]>
Commit | Line | Data |
---|---|---|
92281dee DW |
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
9476df7d | 13 | #include <linux/radix-tree.h> |
7d3dcf26 | 14 | #include <linux/device.h> |
92281dee | 15 | #include <linux/types.h> |
34c0fd54 | 16 | #include <linux/pfn_t.h> |
92281dee DW |
17 | #include <linux/io.h> |
18 | #include <linux/mm.h> | |
41e94a85 | 19 | #include <linux/memory_hotplug.h> |
5042db43 JG |
20 | #include <linux/swap.h> |
21 | #include <linux/swapops.h> | |
92281dee DW |
22 | |
23 | #ifndef ioremap_cache | |
24 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
25 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
26 | { | |
27 | return ioremap(offset, size); | |
28 | } | |
29 | #endif | |
30 | ||
c269cba3 AB |
31 | #ifndef arch_memremap_wb |
32 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | |
33 | { | |
34 | return (__force void *)ioremap_cache(offset, size); | |
35 | } | |
36 | #endif | |
37 | ||
8f716c9b TL |
38 | #ifndef arch_memremap_can_ram_remap |
39 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | |
40 | unsigned long flags) | |
41 | { | |
42 | return true; | |
43 | } | |
44 | #endif | |
45 | ||
46 | static void *try_ram_remap(resource_size_t offset, size_t size, | |
47 | unsigned long flags) | |
182475b7 | 48 | { |
ac343e88 | 49 | unsigned long pfn = PHYS_PFN(offset); |
182475b7 DW |
50 | |
51 | /* In the simple case just return the existing linear address */ | |
8f716c9b TL |
52 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && |
53 | arch_memremap_can_ram_remap(offset, size, flags)) | |
182475b7 | 54 | return __va(offset); |
8f716c9b | 55 | |
c269cba3 | 56 | return NULL; /* fallback to arch_memremap_wb */ |
182475b7 DW |
57 | } |
58 | ||
92281dee DW |
59 | /** |
60 | * memremap() - remap an iomem_resource as cacheable memory | |
61 | * @offset: iomem resource start address | |
62 | * @size: size of remap | |
8f716c9b TL |
63 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, |
64 | * MEMREMAP_ENC, MEMREMAP_DEC | |
92281dee DW |
65 | * |
66 | * memremap() is "ioremap" for cases where it is known that the resource | |
67 | * being mapped does not have i/o side effects and the __iomem | |
c907e0eb BS |
68 | * annotation is not applicable. In the case of multiple flags, the different |
69 | * mapping types will be attempted in the order listed below until one of | |
70 | * them succeeds. | |
92281dee | 71 | * |
1c29f25b | 72 | * MEMREMAP_WB - matches the default mapping for System RAM on |
92281dee DW |
73 | * the architecture. This is usually a read-allocate write-back cache. |
74 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
75 | * memremap() will bypass establishing a new mapping and instead return | |
76 | * a pointer into the direct map. | |
77 | * | |
78 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
79 | * cache or are written through to memory and never exist in a | |
80 | * cache-dirty state with respect to program visibility. Attempts to | |
1c29f25b | 81 | * map System RAM with this mapping type will fail. |
c907e0eb BS |
82 | * |
83 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | |
84 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | |
85 | * uncached. Attempts to map System RAM with this mapping type will fail. | |
92281dee DW |
86 | */ |
87 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
88 | { | |
1c29f25b TK |
89 | int is_ram = region_intersects(offset, size, |
90 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
92281dee DW |
91 | void *addr = NULL; |
92 | ||
cf61e2a1 BS |
93 | if (!flags) |
94 | return NULL; | |
95 | ||
92281dee DW |
96 | if (is_ram == REGION_MIXED) { |
97 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
98 | &offset, (unsigned long) size); | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | /* Try all mapping types requested until one returns non-NULL */ | |
103 | if (flags & MEMREMAP_WB) { | |
92281dee DW |
104 | /* |
105 | * MEMREMAP_WB is special in that it can be satisifed | |
106 | * from the direct map. Some archs depend on the | |
107 | * capability of memremap() to autodetect cases where | |
1c29f25b | 108 | * the requested range is potentially in System RAM. |
92281dee DW |
109 | */ |
110 | if (is_ram == REGION_INTERSECTS) | |
8f716c9b | 111 | addr = try_ram_remap(offset, size, flags); |
182475b7 | 112 | if (!addr) |
c269cba3 | 113 | addr = arch_memremap_wb(offset, size); |
92281dee DW |
114 | } |
115 | ||
116 | /* | |
cf61e2a1 BS |
117 | * If we don't have a mapping yet and other request flags are |
118 | * present then we will be attempting to establish a new virtual | |
92281dee | 119 | * address mapping. Enforce that this mapping is not aliasing |
1c29f25b | 120 | * System RAM. |
92281dee | 121 | */ |
cf61e2a1 | 122 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
92281dee DW |
123 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
124 | &offset, (unsigned long) size); | |
125 | return NULL; | |
126 | } | |
127 | ||
cf61e2a1 | 128 | if (!addr && (flags & MEMREMAP_WT)) |
92281dee | 129 | addr = ioremap_wt(offset, size); |
c907e0eb BS |
130 | |
131 | if (!addr && (flags & MEMREMAP_WC)) | |
132 | addr = ioremap_wc(offset, size); | |
92281dee DW |
133 | |
134 | return addr; | |
135 | } | |
136 | EXPORT_SYMBOL(memremap); | |
137 | ||
138 | void memunmap(void *addr) | |
139 | { | |
140 | if (is_vmalloc_addr(addr)) | |
141 | iounmap((void __iomem *) addr); | |
142 | } | |
143 | EXPORT_SYMBOL(memunmap); | |
7d3dcf26 CH |
144 | |
145 | static void devm_memremap_release(struct device *dev, void *res) | |
146 | { | |
9273a8bb | 147 | memunmap(*(void **)res); |
7d3dcf26 CH |
148 | } |
149 | ||
150 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
151 | { | |
152 | return *(void **)res == match_data; | |
153 | } | |
154 | ||
155 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
156 | size_t size, unsigned long flags) | |
157 | { | |
158 | void **ptr, *addr; | |
159 | ||
538ea4aa DW |
160 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
161 | dev_to_node(dev)); | |
7d3dcf26 | 162 | if (!ptr) |
b36f4761 | 163 | return ERR_PTR(-ENOMEM); |
7d3dcf26 CH |
164 | |
165 | addr = memremap(offset, size, flags); | |
166 | if (addr) { | |
167 | *ptr = addr; | |
168 | devres_add(dev, ptr); | |
93f834df | 169 | } else { |
7d3dcf26 | 170 | devres_free(ptr); |
93f834df TK |
171 | return ERR_PTR(-ENXIO); |
172 | } | |
7d3dcf26 CH |
173 | |
174 | return addr; | |
175 | } | |
176 | EXPORT_SYMBOL(devm_memremap); | |
177 | ||
178 | void devm_memunmap(struct device *dev, void *addr) | |
179 | { | |
d741314f DW |
180 | WARN_ON(devres_release(dev, devm_memremap_release, |
181 | devm_memremap_match, addr)); | |
7d3dcf26 CH |
182 | } |
183 | EXPORT_SYMBOL(devm_memunmap); | |
41e94a85 CH |
184 | |
185 | #ifdef CONFIG_ZONE_DEVICE | |
9476df7d DW |
186 | static DEFINE_MUTEX(pgmap_lock); |
187 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); | |
188 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | |
189 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
190 | ||
ab1b597e | 191 | static unsigned long order_at(struct resource *res, unsigned long pgoff) |
9476df7d | 192 | { |
ab1b597e DW |
193 | unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; |
194 | unsigned long nr_pages, mask; | |
eb7d78c9 | 195 | |
ab1b597e DW |
196 | nr_pages = PHYS_PFN(resource_size(res)); |
197 | if (nr_pages == pgoff) | |
198 | return ULONG_MAX; | |
199 | ||
200 | /* | |
201 | * What is the largest aligned power-of-2 range available from | |
202 | * this resource pgoff to the end of the resource range, | |
203 | * considering the alignment of the current pgoff? | |
204 | */ | |
205 | mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); | |
206 | if (!mask) | |
207 | return ULONG_MAX; | |
208 | ||
209 | return find_first_bit(&mask, BITS_PER_LONG); | |
210 | } | |
211 | ||
212 | #define foreach_order_pgoff(res, order, pgoff) \ | |
213 | for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ | |
214 | pgoff += 1UL << order, order = order_at((res), pgoff)) | |
215 | ||
5042db43 JG |
216 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
217 | int device_private_entry_fault(struct vm_area_struct *vma, | |
218 | unsigned long addr, | |
219 | swp_entry_t entry, | |
220 | unsigned int flags, | |
221 | pmd_t *pmdp) | |
222 | { | |
223 | struct page *page = device_private_entry_to_page(entry); | |
224 | ||
225 | /* | |
226 | * The page_fault() callback must migrate page back to system memory | |
227 | * so that CPU can access it. This might fail for various reasons | |
228 | * (device issue, device was unsafely unplugged, ...). When such | |
229 | * error conditions happen, the callback must return VM_FAULT_SIGBUS. | |
230 | * | |
231 | * Note that because memory cgroup charges are accounted to the device | |
232 | * memory, this should never fail because of memory restrictions (but | |
233 | * allocation of regular system page might still fail because we are | |
234 | * out of memory). | |
235 | * | |
236 | * There is a more in-depth description of what that callback can and | |
237 | * cannot do, in include/linux/memremap.h | |
238 | */ | |
239 | return page->pgmap->page_fault(vma, addr, page, flags, pmdp); | |
240 | } | |
241 | EXPORT_SYMBOL(device_private_entry_fault); | |
242 | #endif /* CONFIG_DEVICE_PRIVATE */ | |
243 | ||
77dd66a3 | 244 | static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) |
ab1b597e DW |
245 | { |
246 | unsigned long pgoff, order; | |
9476df7d DW |
247 | |
248 | mutex_lock(&pgmap_lock); | |
77dd66a3 JS |
249 | foreach_order_pgoff(res, order, pgoff) { |
250 | if (pgoff >= end_pgoff) | |
251 | break; | |
ab1b597e | 252 | radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); |
77dd66a3 | 253 | } |
9476df7d | 254 | mutex_unlock(&pgmap_lock); |
ab1b597e DW |
255 | |
256 | synchronize_rcu(); | |
9476df7d DW |
257 | } |
258 | ||
e7744aa2 | 259 | static unsigned long pfn_first(struct dev_pagemap *pgmap) |
5c2c2587 | 260 | { |
e7744aa2 LG |
261 | const struct resource *res = &pgmap->res; |
262 | struct vmem_altmap *altmap = &pgmap->altmap; | |
5c2c2587 DW |
263 | unsigned long pfn; |
264 | ||
265 | pfn = res->start >> PAGE_SHIFT; | |
e7744aa2 | 266 | if (pgmap->altmap_valid) |
5c2c2587 DW |
267 | pfn += vmem_altmap_offset(altmap); |
268 | return pfn; | |
269 | } | |
270 | ||
e7744aa2 | 271 | static unsigned long pfn_end(struct dev_pagemap *pgmap) |
5c2c2587 | 272 | { |
e7744aa2 | 273 | const struct resource *res = &pgmap->res; |
5c2c2587 DW |
274 | |
275 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
276 | } | |
277 | ||
278 | #define for_each_device_pfn(pfn, map) \ | |
279 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) | |
280 | ||
e8d51348 | 281 | static void devm_memremap_pages_release(void *data) |
41e94a85 | 282 | { |
e7744aa2 | 283 | struct dev_pagemap *pgmap = data; |
e8d51348 | 284 | struct device *dev = pgmap->dev; |
e7744aa2 | 285 | struct resource *res = &pgmap->res; |
9476df7d | 286 | resource_size_t align_start, align_size; |
71389703 DW |
287 | unsigned long pfn; |
288 | ||
e7744aa2 | 289 | for_each_device_pfn(pfn, pgmap) |
71389703 | 290 | put_page(pfn_to_page(pfn)); |
9476df7d | 291 | |
5c2c2587 DW |
292 | if (percpu_ref_tryget_live(pgmap->ref)) { |
293 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | |
294 | percpu_ref_put(pgmap->ref); | |
295 | } | |
296 | ||
41e94a85 | 297 | /* pages are dead and unused, undo the arch mapping */ |
9476df7d | 298 | align_start = res->start & ~(SECTION_SIZE - 1); |
10a0cd6e JS |
299 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
300 | - align_start; | |
b5d24fda | 301 | |
f931ab47 | 302 | mem_hotplug_begin(); |
e7744aa2 LG |
303 | arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? |
304 | &pgmap->altmap : NULL); | |
f931ab47 | 305 | mem_hotplug_done(); |
b5d24fda | 306 | |
9049771f | 307 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
77dd66a3 | 308 | pgmap_radix_release(res, -1); |
e7744aa2 LG |
309 | dev_WARN_ONCE(dev, pgmap->altmap.alloc, |
310 | "%s: failed to free all reserved pages\n", __func__); | |
9476df7d DW |
311 | } |
312 | ||
4b94ffdc DW |
313 | /** |
314 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
315 | * @dev: hosting device for @res | |
e8d51348 | 316 | * @pgmap: pointer to a struct dev_pgmap |
4b94ffdc | 317 | * |
5c2c2587 | 318 | * Notes: |
e8d51348 CH |
319 | * 1/ At a minimum the res, ref and type members of @pgmap must be initialized |
320 | * by the caller before passing it to this function | |
321 | * | |
322 | * 2/ The altmap field may optionally be initialized, in which case altmap_valid | |
323 | * must be set to true | |
324 | * | |
325 | * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages() | |
326 | * time (or devm release event). The expected order of events is that ref has | |
71389703 DW |
327 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The |
328 | * wait for the completion of all references being dropped and | |
329 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). | |
5c2c2587 | 330 | * |
e8d51348 | 331 | * 4/ res is expected to be a host memory range that could feasibly be |
5c2c2587 DW |
332 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
333 | * this is not enforced. | |
4b94ffdc | 334 | */ |
e8d51348 | 335 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
41e94a85 | 336 | { |
ab1b597e | 337 | resource_size_t align_start, align_size, align_end; |
e8d51348 CH |
338 | struct vmem_altmap *altmap = pgmap->altmap_valid ? |
339 | &pgmap->altmap : NULL; | |
ab1b597e | 340 | unsigned long pfn, pgoff, order; |
9049771f | 341 | pgprot_t pgprot = PAGE_KERNEL; |
1fdcce6e | 342 | int error, nid, is_ram, i = 0; |
e8d51348 | 343 | struct resource *res = &pgmap->res; |
5f29a77c DW |
344 | |
345 | align_start = res->start & ~(SECTION_SIZE - 1); | |
346 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | |
347 | - align_start; | |
d37a14bb LT |
348 | is_ram = region_intersects(align_start, align_size, |
349 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
41e94a85 CH |
350 | |
351 | if (is_ram == REGION_MIXED) { | |
352 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
353 | __func__, res); | |
354 | return ERR_PTR(-ENXIO); | |
355 | } | |
356 | ||
357 | if (is_ram == REGION_INTERSECTS) | |
358 | return __va(res->start); | |
359 | ||
e8d51348 | 360 | if (!pgmap->ref) |
5c2c2587 DW |
361 | return ERR_PTR(-EINVAL); |
362 | ||
4b94ffdc | 363 | pgmap->dev = dev; |
4b94ffdc | 364 | |
9476df7d DW |
365 | mutex_lock(&pgmap_lock); |
366 | error = 0; | |
eb7d78c9 | 367 | align_end = align_start + align_size - 1; |
ab1b597e DW |
368 | |
369 | foreach_order_pgoff(res, order, pgoff) { | |
ab1b597e | 370 | error = __radix_tree_insert(&pgmap_radix, |
e7744aa2 | 371 | PHYS_PFN(res->start) + pgoff, order, pgmap); |
9476df7d DW |
372 | if (error) { |
373 | dev_err(dev, "%s: failed: %d\n", __func__, error); | |
374 | break; | |
375 | } | |
376 | } | |
377 | mutex_unlock(&pgmap_lock); | |
378 | if (error) | |
379 | goto err_radix; | |
380 | ||
41e94a85 CH |
381 | nid = dev_to_node(dev); |
382 | if (nid < 0) | |
7eff93b7 | 383 | nid = numa_mem_id(); |
41e94a85 | 384 | |
9049771f DW |
385 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, |
386 | align_size); | |
387 | if (error) | |
388 | goto err_pfn_remap; | |
389 | ||
f931ab47 | 390 | mem_hotplug_begin(); |
24e6d5a5 | 391 | error = arch_add_memory(nid, align_start, align_size, altmap, false); |
f1dd2cd1 MH |
392 | if (!error) |
393 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
394 | align_start >> PAGE_SHIFT, | |
a99583e7 | 395 | align_size >> PAGE_SHIFT, altmap); |
f931ab47 | 396 | mem_hotplug_done(); |
9476df7d DW |
397 | if (error) |
398 | goto err_add_memory; | |
41e94a85 | 399 | |
e7744aa2 | 400 | for_each_device_pfn(pfn, pgmap) { |
5c2c2587 DW |
401 | struct page *page = pfn_to_page(pfn); |
402 | ||
d77a117e DW |
403 | /* |
404 | * ZONE_DEVICE pages union ->lru with a ->pgmap back | |
405 | * pointer. It is a bug if a ZONE_DEVICE page is ever | |
406 | * freed or placed on a driver-private list. Seed the | |
407 | * storage with LIST_POISON* values. | |
408 | */ | |
409 | list_del(&page->lru); | |
5c2c2587 | 410 | page->pgmap = pgmap; |
e8d51348 | 411 | percpu_ref_get(pgmap->ref); |
1fdcce6e MH |
412 | if (!(++i % 1024)) |
413 | cond_resched(); | |
5c2c2587 | 414 | } |
e8d51348 CH |
415 | |
416 | devm_add_action(dev, devm_memremap_pages_release, pgmap); | |
417 | ||
41e94a85 | 418 | return __va(res->start); |
9476df7d DW |
419 | |
420 | err_add_memory: | |
9049771f DW |
421 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
422 | err_pfn_remap: | |
9476df7d | 423 | err_radix: |
77dd66a3 | 424 | pgmap_radix_release(res, pgoff); |
e7744aa2 | 425 | devres_free(pgmap); |
9476df7d | 426 | return ERR_PTR(error); |
41e94a85 CH |
427 | } |
428 | EXPORT_SYMBOL(devm_memremap_pages); | |
4b94ffdc DW |
429 | |
430 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | |
431 | { | |
432 | /* number of pfns from base where pfn_to_page() is valid */ | |
433 | return altmap->reserve + altmap->free; | |
434 | } | |
435 | ||
436 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
437 | { | |
438 | altmap->alloc -= nr_pfns; | |
439 | } | |
440 | ||
0822acb8 CH |
441 | /** |
442 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
443 | * @pfn: page frame number to lookup page_map | |
444 | * @pgmap: optional known pgmap that already has a reference | |
445 | * | |
832d7aa0 CH |
446 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
447 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
448 | */ |
449 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
450 | struct dev_pagemap *pgmap) | |
451 | { | |
0822acb8 CH |
452 | resource_size_t phys = PFN_PHYS(pfn); |
453 | ||
454 | /* | |
832d7aa0 | 455 | * In the cached case we're already holding a live reference. |
0822acb8 | 456 | */ |
832d7aa0 | 457 | if (pgmap) { |
e7744aa2 | 458 | if (phys >= pgmap->res.start && phys <= pgmap->res.end) |
832d7aa0 CH |
459 | return pgmap; |
460 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
461 | } |
462 | ||
463 | /* fall back to slow path lookup */ | |
464 | rcu_read_lock(); | |
e697c5b9 | 465 | pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); |
0822acb8 CH |
466 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) |
467 | pgmap = NULL; | |
468 | rcu_read_unlock(); | |
469 | ||
470 | return pgmap; | |
471 | } | |
472 | #endif /* CONFIG_ZONE_DEVICE */ | |
7b2d55d2 | 473 | |
df6ad698 JG |
474 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
475 | void put_zone_device_private_or_public_page(struct page *page) | |
7b2d55d2 JG |
476 | { |
477 | int count = page_ref_dec_return(page); | |
478 | ||
479 | /* | |
480 | * If refcount is 1 then page is freed and refcount is stable as nobody | |
481 | * holds a reference on the page. | |
482 | */ | |
483 | if (count == 1) { | |
484 | /* Clear Active bit in case of parallel mark_page_accessed */ | |
485 | __ClearPageActive(page); | |
486 | __ClearPageWaiters(page); | |
487 | ||
488 | page->mapping = NULL; | |
c733a828 | 489 | mem_cgroup_uncharge(page); |
7b2d55d2 JG |
490 | |
491 | page->pgmap->page_free(page, page->pgmap->data); | |
492 | } else if (!count) | |
493 | __put_page(page); | |
494 | } | |
df6ad698 JG |
495 | EXPORT_SYMBOL(put_zone_device_private_or_public_page); |
496 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |