]>
Commit | Line | Data |
---|---|---|
8607a965 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
a254129e JK |
2 | /* |
3 | * Contiguous Memory Allocator | |
4 | * | |
5 | * Copyright (c) 2010-2011 by Samsung Electronics. | |
6 | * Copyright IBM Corporation, 2013 | |
7 | * Copyright LG Electronics Inc., 2014 | |
8 | * Written by: | |
9 | * Marek Szyprowski <[email protected]> | |
10 | * Michal Nazarewicz <[email protected]> | |
11 | * Aneesh Kumar K.V <[email protected]> | |
12 | * Joonsoo Kim <[email protected]> | |
a254129e JK |
13 | */ |
14 | ||
15 | #define pr_fmt(fmt) "cma: " fmt | |
16 | ||
17 | #ifdef CONFIG_CMA_DEBUG | |
18 | #ifndef DEBUG | |
19 | # define DEBUG | |
20 | #endif | |
21 | #endif | |
99e8ea6c | 22 | #define CREATE_TRACE_POINTS |
a254129e JK |
23 | |
24 | #include <linux/memblock.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/mm.h> | |
a254129e JK |
27 | #include <linux/sizes.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/log2.h> | |
30 | #include <linux/cma.h> | |
f7426b98 | 31 | #include <linux/highmem.h> |
620951e2 | 32 | #include <linux/io.h> |
514c6032 | 33 | #include <linux/kmemleak.h> |
99e8ea6c | 34 | #include <trace/events/cma.h> |
a254129e | 35 | |
28b24c1f SL |
36 | #include "cma.h" |
37 | ||
38 | struct cma cma_areas[MAX_CMA_AREAS]; | |
39 | unsigned cma_area_count; | |
60a60e32 | 40 | static DEFINE_MUTEX(cma_mutex); |
a254129e | 41 | |
ac173824 | 42 | phys_addr_t cma_get_base(const struct cma *cma) |
a254129e JK |
43 | { |
44 | return PFN_PHYS(cma->base_pfn); | |
45 | } | |
46 | ||
ac173824 | 47 | unsigned long cma_get_size(const struct cma *cma) |
a254129e JK |
48 | { |
49 | return cma->count << PAGE_SHIFT; | |
50 | } | |
51 | ||
f318dd08 LA |
52 | const char *cma_get_name(const struct cma *cma) |
53 | { | |
18e98e56 | 54 | return cma->name; |
f318dd08 LA |
55 | } |
56 | ||
ac173824 | 57 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
e048cb32 | 58 | unsigned int align_order) |
a254129e | 59 | { |
68faed63 WY |
60 | if (align_order <= cma->order_per_bit) |
61 | return 0; | |
62 | return (1UL << (align_order - cma->order_per_bit)) - 1; | |
a254129e JK |
63 | } |
64 | ||
850fc430 | 65 | /* |
e048cb32 DB |
66 | * Find the offset of the base PFN from the specified align_order. |
67 | * The value returned is represented in order_per_bits. | |
850fc430 | 68 | */ |
ac173824 | 69 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
e048cb32 | 70 | unsigned int align_order) |
b5be83e3 | 71 | { |
e048cb32 DB |
72 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
73 | >> cma->order_per_bit; | |
b5be83e3 GF |
74 | } |
75 | ||
ac173824 SL |
76 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
77 | unsigned long pages) | |
a254129e JK |
78 | { |
79 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | |
80 | } | |
81 | ||
ac173824 | 82 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
78fa5150 | 83 | unsigned long count) |
a254129e JK |
84 | { |
85 | unsigned long bitmap_no, bitmap_count; | |
0ef7dcac | 86 | unsigned long flags; |
a254129e JK |
87 | |
88 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | |
89 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | |
90 | ||
0ef7dcac | 91 | spin_lock_irqsave(&cma->lock, flags); |
a254129e | 92 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
0ef7dcac | 93 | spin_unlock_irqrestore(&cma->lock, flags); |
a254129e JK |
94 | } |
95 | ||
3a5139f1 | 96 | static void __init cma_activate_area(struct cma *cma) |
a254129e | 97 | { |
072355c1 | 98 | unsigned long base_pfn = cma->base_pfn, pfn; |
a254129e JK |
99 | struct zone *zone; |
100 | ||
2184f992 | 101 | cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); |
3a5139f1 MK |
102 | if (!cma->bitmap) |
103 | goto out_error; | |
a254129e | 104 | |
072355c1 DH |
105 | /* |
106 | * alloc_contig_range() requires the pfn range specified to be in the | |
107 | * same zone. Simplify by forcing the entire CMA resv range to be in the | |
108 | * same zone. | |
109 | */ | |
110 | WARN_ON_ONCE(!pfn_valid(base_pfn)); | |
111 | zone = page_zone(pfn_to_page(base_pfn)); | |
112 | for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { | |
113 | WARN_ON_ONCE(!pfn_valid(pfn)); | |
114 | if (page_zone(pfn_to_page(pfn)) != zone) | |
115 | goto not_in_zone; | |
116 | } | |
117 | ||
118 | for (pfn = base_pfn; pfn < base_pfn + cma->count; | |
119 | pfn += pageblock_nr_pages) | |
120 | init_cma_reserved_pageblock(pfn_to_page(pfn)); | |
a254129e | 121 | |
0ef7dcac | 122 | spin_lock_init(&cma->lock); |
26b02a1f SL |
123 | |
124 | #ifdef CONFIG_CMA_DEBUGFS | |
125 | INIT_HLIST_HEAD(&cma->mem_head); | |
126 | spin_lock_init(&cma->mem_head_lock); | |
127 | #endif | |
128 | ||
3a5139f1 | 129 | return; |
a254129e | 130 | |
d883c6cf | 131 | not_in_zone: |
2184f992 | 132 | bitmap_free(cma->bitmap); |
3a5139f1 | 133 | out_error: |
072355c1 | 134 | /* Expose all pages to the buddy, they are useless for CMA. */ |
27d121d0 HB |
135 | if (!cma->reserve_pages_on_error) { |
136 | for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) | |
137 | free_reserved_page(pfn_to_page(pfn)); | |
138 | } | |
072355c1 | 139 | totalcma_pages -= cma->count; |
f022d8cb | 140 | cma->count = 0; |
3a5139f1 MK |
141 | pr_err("CMA area %s could not be activated\n", cma->name); |
142 | return; | |
a254129e JK |
143 | } |
144 | ||
145 | static int __init cma_init_reserved_areas(void) | |
146 | { | |
147 | int i; | |
148 | ||
3a5139f1 MK |
149 | for (i = 0; i < cma_area_count; i++) |
150 | cma_activate_area(&cma_areas[i]); | |
a254129e JK |
151 | |
152 | return 0; | |
153 | } | |
d883c6cf | 154 | core_initcall(cma_init_reserved_areas); |
a254129e | 155 | |
27d121d0 HB |
156 | void __init cma_reserve_pages_on_error(struct cma *cma) |
157 | { | |
158 | cma->reserve_pages_on_error = true; | |
159 | } | |
160 | ||
de9e14ee MS |
161 | /** |
162 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | |
163 | * @base: Base address of the reserved area | |
164 | * @size: Size of the reserved area (in bytes), | |
165 | * @order_per_bit: Order of pages represented by one bit on bitmap. | |
e8b098fc MR |
166 | * @name: The name of the area. If this parameter is NULL, the name of |
167 | * the area will be set to "cmaN", where N is a running counter of | |
168 | * used areas. | |
de9e14ee MS |
169 | * @res_cma: Pointer to store the created cma region. |
170 | * | |
171 | * This function creates custom contiguous area from already reserved memory. | |
172 | */ | |
173 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | |
ac173824 | 174 | unsigned int order_per_bit, |
f318dd08 | 175 | const char *name, |
ac173824 | 176 | struct cma **res_cma) |
de9e14ee MS |
177 | { |
178 | struct cma *cma; | |
de9e14ee MS |
179 | |
180 | /* Sanity checks */ | |
181 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | |
182 | pr_err("Not enough slots for CMA reserved regions!\n"); | |
183 | return -ENOSPC; | |
184 | } | |
185 | ||
186 | if (!size || !memblock_is_region_reserved(base, size)) | |
187 | return -EINVAL; | |
188 | ||
de9e14ee | 189 | /* alignment should be aligned with order_per_bit */ |
e16faf26 | 190 | if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) |
de9e14ee MS |
191 | return -EINVAL; |
192 | ||
e16faf26 DH |
193 | /* ensure minimal alignment required by mm core */ |
194 | if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) | |
de9e14ee MS |
195 | return -EINVAL; |
196 | ||
197 | /* | |
198 | * Each reserved area must be initialised later, when more kernel | |
199 | * subsystems (like slab allocator) are available. | |
200 | */ | |
201 | cma = &cma_areas[cma_area_count]; | |
18e98e56 BS |
202 | |
203 | if (name) | |
204 | snprintf(cma->name, CMA_MAX_NAME, name); | |
205 | else | |
206 | snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); | |
207 | ||
de9e14ee MS |
208 | cma->base_pfn = PFN_DOWN(base); |
209 | cma->count = size >> PAGE_SHIFT; | |
210 | cma->order_per_bit = order_per_bit; | |
211 | *res_cma = cma; | |
212 | cma_area_count++; | |
94737a85 | 213 | totalcma_pages += (size / PAGE_SIZE); |
de9e14ee MS |
214 | |
215 | return 0; | |
216 | } | |
217 | ||
a254129e | 218 | /** |
8676af1f | 219 | * cma_declare_contiguous_nid() - reserve custom contiguous area |
a254129e | 220 | * @base: Base address of the reserved area optional, use 0 for any |
c1f733aa | 221 | * @size: Size of the reserved area (in bytes), |
a254129e JK |
222 | * @limit: End address of the reserved memory (optional, 0 for any). |
223 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | |
224 | * @order_per_bit: Order of pages represented by one bit on bitmap. | |
a254129e | 225 | * @fixed: hint about where to place the reserved area |
e8b098fc | 226 | * @name: The name of the area. See function cma_init_reserved_mem() |
c1f733aa | 227 | * @res_cma: Pointer to store the created cma region. |
8676af1f | 228 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
a254129e JK |
229 | * |
230 | * This function reserves memory from early allocator. It should be | |
231 | * called by arch specific code once the early allocator (memblock or bootmem) | |
232 | * has been activated and all other subsystems have already allocated/reserved | |
233 | * memory. This function allows to create custom reserved areas. | |
234 | * | |
235 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | |
236 | * reserve in range from @base to @limit. | |
237 | */ | |
8676af1f | 238 | int __init cma_declare_contiguous_nid(phys_addr_t base, |
c1f733aa | 239 | phys_addr_t size, phys_addr_t limit, |
a254129e | 240 | phys_addr_t alignment, unsigned int order_per_bit, |
8676af1f AB |
241 | bool fixed, const char *name, struct cma **res_cma, |
242 | int nid) | |
a254129e | 243 | { |
f7426b98 | 244 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
6b101e2a | 245 | phys_addr_t highmem_start; |
a254129e JK |
246 | int ret = 0; |
247 | ||
6b101e2a | 248 | /* |
2dece445 LA |
249 | * We can't use __pa(high_memory) directly, since high_memory |
250 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) | |
251 | * complain. Find the boundary by adding one to the last valid | |
252 | * address. | |
6b101e2a | 253 | */ |
2dece445 | 254 | highmem_start = __pa(high_memory - 1) + 1; |
56fa4f60 LP |
255 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
256 | __func__, &size, &base, &limit, &alignment); | |
a254129e JK |
257 | |
258 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | |
259 | pr_err("Not enough slots for CMA reserved regions!\n"); | |
260 | return -ENOSPC; | |
261 | } | |
262 | ||
263 | if (!size) | |
264 | return -EINVAL; | |
265 | ||
266 | if (alignment && !is_power_of_2(alignment)) | |
267 | return -EINVAL; | |
268 | ||
e16faf26 DH |
269 | /* Sanitise input arguments. */ |
270 | alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); | |
c633324e DB |
271 | if (fixed && base & (alignment - 1)) { |
272 | ret = -EINVAL; | |
273 | pr_err("Region at %pa must be aligned to %pa bytes\n", | |
274 | &base, &alignment); | |
275 | goto err; | |
276 | } | |
a254129e JK |
277 | base = ALIGN(base, alignment); |
278 | size = ALIGN(size, alignment); | |
279 | limit &= ~(alignment - 1); | |
280 | ||
800a85d3 LP |
281 | if (!base) |
282 | fixed = false; | |
283 | ||
a254129e JK |
284 | /* size should be aligned with order_per_bit */ |
285 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | |
286 | return -EINVAL; | |
287 | ||
f7426b98 | 288 | /* |
16195ddd LP |
289 | * If allocating at a fixed base the request region must not cross the |
290 | * low/high memory boundary. | |
f7426b98 | 291 | */ |
16195ddd | 292 | if (fixed && base < highmem_start && base + size > highmem_start) { |
f7426b98 | 293 | ret = -EINVAL; |
56fa4f60 LP |
294 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
295 | &base, &highmem_start); | |
f7426b98 MS |
296 | goto err; |
297 | } | |
298 | ||
16195ddd LP |
299 | /* |
300 | * If the limit is unspecified or above the memblock end, its effective | |
301 | * value will be the memblock end. Set it explicitly to simplify further | |
302 | * checks. | |
303 | */ | |
304 | if (limit == 0 || limit > memblock_end) | |
305 | limit = memblock_end; | |
306 | ||
c633324e DB |
307 | if (base + size > limit) { |
308 | ret = -EINVAL; | |
309 | pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", | |
310 | &size, &base, &limit); | |
311 | goto err; | |
312 | } | |
313 | ||
a254129e | 314 | /* Reserve memory */ |
800a85d3 | 315 | if (fixed) { |
a254129e JK |
316 | if (memblock_is_region_reserved(base, size) || |
317 | memblock_reserve(base, size) < 0) { | |
318 | ret = -EBUSY; | |
319 | goto err; | |
320 | } | |
321 | } else { | |
16195ddd LP |
322 | phys_addr_t addr = 0; |
323 | ||
324 | /* | |
325 | * All pages in the reserved area must come from the same zone. | |
326 | * If the requested region crosses the low/high memory boundary, | |
327 | * try allocating from high memory first and fall back to low | |
328 | * memory in case of failure. | |
329 | */ | |
330 | if (base < highmem_start && limit > highmem_start) { | |
8676af1f | 331 | addr = memblock_alloc_range_nid(size, alignment, |
40366bd7 | 332 | highmem_start, limit, nid, true); |
16195ddd LP |
333 | limit = highmem_start; |
334 | } | |
335 | ||
df2ff39e RG |
336 | /* |
337 | * If there is enough memory, try a bottom-up allocation first. | |
338 | * It will place the new cma area close to the start of the node | |
339 | * and guarantee that the compaction is moving pages out of the | |
340 | * cma area and not into it. | |
341 | * Avoid using first 4GB to not interfere with constrained zones | |
342 | * like DMA/DMA32. | |
343 | */ | |
344 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
345 | if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { | |
346 | memblock_set_bottom_up(true); | |
347 | addr = memblock_alloc_range_nid(size, alignment, SZ_4G, | |
348 | limit, nid, true); | |
349 | memblock_set_bottom_up(false); | |
350 | } | |
351 | #endif | |
352 | ||
a254129e | 353 | if (!addr) { |
8676af1f | 354 | addr = memblock_alloc_range_nid(size, alignment, base, |
40366bd7 | 355 | limit, nid, true); |
16195ddd LP |
356 | if (!addr) { |
357 | ret = -ENOMEM; | |
358 | goto err; | |
359 | } | |
a254129e | 360 | } |
16195ddd | 361 | |
620951e2 TR |
362 | /* |
363 | * kmemleak scans/reads tracked objects for pointers to other | |
364 | * objects but this address isn't mapped and accessible | |
365 | */ | |
9099daed | 366 | kmemleak_ignore_phys(addr); |
16195ddd | 367 | base = addr; |
a254129e JK |
368 | } |
369 | ||
f318dd08 | 370 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); |
de9e14ee | 371 | if (ret) |
0d3bd18a | 372 | goto free_mem; |
a254129e | 373 | |
56fa4f60 LP |
374 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
375 | &base); | |
a254129e JK |
376 | return 0; |
377 | ||
0d3bd18a | 378 | free_mem: |
3ecc6834 | 379 | memblock_phys_free(base, size); |
a254129e | 380 | err: |
0de9d2eb | 381 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
a254129e JK |
382 | return ret; |
383 | } | |
384 | ||
dbe43d4d JK |
385 | #ifdef CONFIG_CMA_DEBUG |
386 | static void cma_debug_show_areas(struct cma *cma) | |
387 | { | |
2b59e01a | 388 | unsigned long next_zero_bit, next_set_bit, nr_zero; |
dbe43d4d | 389 | unsigned long start = 0; |
2b59e01a YH |
390 | unsigned long nr_part, nr_total = 0; |
391 | unsigned long nbits = cma_bitmap_maxno(cma); | |
dbe43d4d | 392 | |
0ef7dcac | 393 | spin_lock_irq(&cma->lock); |
dbe43d4d JK |
394 | pr_info("number of available pages: "); |
395 | for (;;) { | |
2b59e01a YH |
396 | next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); |
397 | if (next_zero_bit >= nbits) | |
dbe43d4d | 398 | break; |
2b59e01a | 399 | next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); |
dbe43d4d | 400 | nr_zero = next_set_bit - next_zero_bit; |
2b59e01a YH |
401 | nr_part = nr_zero << cma->order_per_bit; |
402 | pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, | |
403 | next_zero_bit); | |
404 | nr_total += nr_part; | |
dbe43d4d JK |
405 | start = next_zero_bit + nr_zero; |
406 | } | |
2b59e01a | 407 | pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); |
0ef7dcac | 408 | spin_unlock_irq(&cma->lock); |
dbe43d4d JK |
409 | } |
410 | #else | |
411 | static inline void cma_debug_show_areas(struct cma *cma) { } | |
412 | #endif | |
413 | ||
a254129e JK |
414 | /** |
415 | * cma_alloc() - allocate pages from contiguous area | |
416 | * @cma: Contiguous memory region for which the allocation is performed. | |
417 | * @count: Requested number of pages. | |
418 | * @align: Requested alignment of pages (in PAGE_SIZE order). | |
65182029 | 419 | * @no_warn: Avoid printing message about failed allocation |
a254129e JK |
420 | * |
421 | * This function allocates part of contiguous memory on specific | |
422 | * contiguous memory area. | |
423 | */ | |
78fa5150 MK |
424 | struct page *cma_alloc(struct cma *cma, unsigned long count, |
425 | unsigned int align, bool no_warn) | |
a254129e | 426 | { |
3acaea68 AM |
427 | unsigned long mask, offset; |
428 | unsigned long pfn = -1; | |
429 | unsigned long start = 0; | |
a254129e | 430 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
78fa5150 | 431 | unsigned long i; |
a254129e | 432 | struct page *page = NULL; |
dbe43d4d | 433 | int ret = -ENOMEM; |
a254129e | 434 | |
835832ba | 435 | if (!cma || !cma->count || !cma->bitmap) |
bbb26920 | 436 | goto out; |
a254129e | 437 | |
78fa5150 | 438 | pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, |
a254129e JK |
439 | count, align); |
440 | ||
441 | if (!count) | |
bbb26920 | 442 | goto out; |
a254129e | 443 | |
7bc1aec5 LM |
444 | trace_cma_alloc_start(cma->name, count, align); |
445 | ||
a254129e | 446 | mask = cma_bitmap_aligned_mask(cma, align); |
b5be83e3 | 447 | offset = cma_bitmap_aligned_offset(cma, align); |
a254129e JK |
448 | bitmap_maxno = cma_bitmap_maxno(cma); |
449 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | |
450 | ||
6b36ba59 | 451 | if (bitmap_count > bitmap_maxno) |
bbb26920 | 452 | goto out; |
6b36ba59 | 453 | |
a254129e | 454 | for (;;) { |
0ef7dcac | 455 | spin_lock_irq(&cma->lock); |
b5be83e3 GF |
456 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
457 | bitmap_maxno, start, bitmap_count, mask, | |
458 | offset); | |
a254129e | 459 | if (bitmap_no >= bitmap_maxno) { |
0ef7dcac | 460 | spin_unlock_irq(&cma->lock); |
a254129e JK |
461 | break; |
462 | } | |
463 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | |
464 | /* | |
465 | * It's safe to drop the lock here. We've marked this region for | |
466 | * our exclusive use. If the migration fails we will take the | |
467 | * lock again and unmark it. | |
468 | */ | |
0ef7dcac | 469 | spin_unlock_irq(&cma->lock); |
a254129e JK |
470 | |
471 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | |
60a60e32 | 472 | mutex_lock(&cma_mutex); |
ca96b625 | 473 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, |
65182029 | 474 | GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); |
60a60e32 | 475 | mutex_unlock(&cma_mutex); |
a254129e JK |
476 | if (ret == 0) { |
477 | page = pfn_to_page(pfn); | |
478 | break; | |
a254129e | 479 | } |
b7155e76 | 480 | |
a254129e | 481 | cma_clear_bitmap(cma, pfn, count); |
b7155e76 JK |
482 | if (ret != -EBUSY) |
483 | break; | |
484 | ||
a254129e JK |
485 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
486 | __func__, pfn_to_page(pfn)); | |
7bc1aec5 | 487 | |
3aab8ae7 MK |
488 | trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), |
489 | count, align); | |
a254129e JK |
490 | /* try again with a bit different memory target */ |
491 | start = bitmap_no + mask + 1; | |
492 | } | |
493 | ||
3aab8ae7 | 494 | trace_cma_alloc_finish(cma->name, pfn, page, count, align); |
99e8ea6c | 495 | |
2813b9c0 AK |
496 | /* |
497 | * CMA can allocate multiple page blocks, which results in different | |
498 | * blocks being marked with different tags. Reset the tags to ignore | |
499 | * those page blocks. | |
500 | */ | |
501 | if (page) { | |
502 | for (i = 0; i < count; i++) | |
503 | page_kasan_tag_reset(page + i); | |
504 | } | |
505 | ||
65182029 | 506 | if (ret && !no_warn) { |
78fa5150 | 507 | pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", |
63f83b31 | 508 | __func__, cma->name, count, ret); |
dbe43d4d JK |
509 | cma_debug_show_areas(cma); |
510 | } | |
511 | ||
a254129e | 512 | pr_debug("%s(): returned %p\n", __func__, page); |
bbb26920 | 513 | out: |
43ca106f | 514 | if (page) { |
bbb26920 | 515 | count_vm_event(CMA_ALLOC_SUCCESS); |
43ca106f MK |
516 | cma_sysfs_account_success_pages(cma, count); |
517 | } else { | |
bbb26920 | 518 | count_vm_event(CMA_ALLOC_FAIL); |
43ca106f MK |
519 | if (cma) |
520 | cma_sysfs_account_fail_pages(cma, count); | |
521 | } | |
bbb26920 | 522 | |
a254129e JK |
523 | return page; |
524 | } | |
525 | ||
9871e2de MK |
526 | bool cma_pages_valid(struct cma *cma, const struct page *pages, |
527 | unsigned long count) | |
528 | { | |
529 | unsigned long pfn; | |
530 | ||
531 | if (!cma || !pages) | |
532 | return false; | |
533 | ||
534 | pfn = page_to_pfn(pages); | |
535 | ||
536 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { | |
537 | pr_debug("%s(page %p, count %lu)\n", __func__, | |
538 | (void *)pages, count); | |
539 | return false; | |
540 | } | |
541 | ||
542 | return true; | |
543 | } | |
544 | ||
a254129e JK |
545 | /** |
546 | * cma_release() - release allocated pages | |
547 | * @cma: Contiguous memory region for which the allocation is performed. | |
548 | * @pages: Allocated pages. | |
549 | * @count: Number of allocated pages. | |
550 | * | |
929f92f7 | 551 | * This function releases memory allocated by cma_alloc(). |
a254129e JK |
552 | * It returns false when provided pages do not belong to contiguous area and |
553 | * true otherwise. | |
554 | */ | |
78fa5150 MK |
555 | bool cma_release(struct cma *cma, const struct page *pages, |
556 | unsigned long count) | |
a254129e JK |
557 | { |
558 | unsigned long pfn; | |
559 | ||
9871e2de | 560 | if (!cma_pages_valid(cma, pages, count)) |
a254129e JK |
561 | return false; |
562 | ||
78fa5150 | 563 | pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); |
a254129e JK |
564 | |
565 | pfn = page_to_pfn(pages); | |
566 | ||
a254129e JK |
567 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
568 | ||
569 | free_contig_range(pfn, count); | |
570 | cma_clear_bitmap(cma, pfn, count); | |
3aab8ae7 | 571 | trace_cma_release(cma->name, pfn, pages, count); |
a254129e JK |
572 | |
573 | return true; | |
574 | } | |
e4231bcd LA |
575 | |
576 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) | |
577 | { | |
578 | int i; | |
579 | ||
580 | for (i = 0; i < cma_area_count; i++) { | |
581 | int ret = it(&cma_areas[i], data); | |
582 | ||
583 | if (ret) | |
584 | return ret; | |
585 | } | |
586 | ||
587 | return 0; | |
588 | } |