2 * Contiguous Memory Allocator
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
19 #define pr_fmt(fmt) "cma: " fmt
21 #ifdef CONFIG_CMA_DEBUG
26 #define CREATE_TRACE_POINTS
28 #include <linux/memblock.h>
29 #include <linux/err.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
38 #include <trace/events/cma.h>
42 struct cma cma_areas[MAX_CMA_AREAS];
43 unsigned cma_area_count;
44 static DEFINE_MUTEX(cma_mutex);
46 phys_addr_t cma_get_base(const struct cma *cma)
48 return PFN_PHYS(cma->base_pfn);
51 unsigned long cma_get_size(const struct cma *cma)
53 return cma->count << PAGE_SHIFT;
56 const char *cma_get_name(const struct cma *cma)
58 return cma->name ? cma->name : "(undefined)";
61 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62 unsigned int align_order)
64 if (align_order <= cma->order_per_bit)
66 return (1UL << (align_order - cma->order_per_bit)) - 1;
70 * Find the offset of the base PFN from the specified align_order.
71 * The value returned is represented in order_per_bits.
73 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74 unsigned int align_order)
76 return (cma->base_pfn & ((1UL << align_order) - 1))
77 >> cma->order_per_bit;
80 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
86 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
89 unsigned long bitmap_no, bitmap_count;
91 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
92 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
94 mutex_lock(&cma->lock);
95 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
96 mutex_unlock(&cma->lock);
99 static int __init cma_activate_area(struct cma *cma)
101 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
102 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
103 unsigned i = cma->count >> pageblock_order;
106 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
111 WARN_ON_ONCE(!pfn_valid(pfn));
112 zone = page_zone(pfn_to_page(pfn));
118 for (j = pageblock_nr_pages; j; --j, pfn++) {
119 WARN_ON_ONCE(!pfn_valid(pfn));
121 * alloc_contig_range requires the pfn range
122 * specified to be in the same zone. Make this
123 * simple by forcing the entire CMA resv range
124 * to be in the same zone.
126 if (page_zone(pfn_to_page(pfn)) != zone)
129 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
132 mutex_init(&cma->lock);
134 #ifdef CONFIG_CMA_DEBUGFS
135 INIT_HLIST_HEAD(&cma->mem_head);
136 spin_lock_init(&cma->mem_head_lock);
142 pr_err("CMA area %s could not be activated\n", cma->name);
148 static int __init cma_init_reserved_areas(void)
152 for (i = 0; i < cma_area_count; i++) {
153 int ret = cma_activate_area(&cma_areas[i]);
161 core_initcall(cma_init_reserved_areas);
164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
165 * @base: Base address of the reserved area
166 * @size: Size of the reserved area (in bytes),
167 * @order_per_bit: Order of pages represented by one bit on bitmap.
168 * @name: The name of the area. If this parameter is NULL, the name of
169 * the area will be set to "cmaN", where N is a running counter of
171 * @res_cma: Pointer to store the created cma region.
173 * This function creates custom contiguous area from already reserved memory.
175 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
176 unsigned int order_per_bit,
178 struct cma **res_cma)
181 phys_addr_t alignment;
184 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
185 pr_err("Not enough slots for CMA reserved regions!\n");
189 if (!size || !memblock_is_region_reserved(base, size))
192 /* ensure minimal alignment required by mm core */
193 alignment = PAGE_SIZE <<
194 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
196 /* alignment should be aligned with order_per_bit */
197 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
200 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
204 * Each reserved area must be initialised later, when more kernel
205 * subsystems (like slab allocator) are available.
207 cma = &cma_areas[cma_area_count];
211 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
215 cma->base_pfn = PFN_DOWN(base);
216 cma->count = size >> PAGE_SHIFT;
217 cma->order_per_bit = order_per_bit;
220 totalcma_pages += (size / PAGE_SIZE);
226 * cma_declare_contiguous() - reserve custom contiguous area
227 * @base: Base address of the reserved area optional, use 0 for any
228 * @size: Size of the reserved area (in bytes),
229 * @limit: End address of the reserved memory (optional, 0 for any).
230 * @alignment: Alignment for the CMA area, should be power of 2 or zero
231 * @order_per_bit: Order of pages represented by one bit on bitmap.
232 * @fixed: hint about where to place the reserved area
233 * @name: The name of the area. See function cma_init_reserved_mem()
234 * @res_cma: Pointer to store the created cma region.
236 * This function reserves memory from early allocator. It should be
237 * called by arch specific code once the early allocator (memblock or bootmem)
238 * has been activated and all other subsystems have already allocated/reserved
239 * memory. This function allows to create custom reserved areas.
241 * If @fixed is true, reserve contiguous area at exactly @base. If false,
242 * reserve in range from @base to @limit.
244 int __init cma_declare_contiguous(phys_addr_t base,
245 phys_addr_t size, phys_addr_t limit,
246 phys_addr_t alignment, unsigned int order_per_bit,
247 bool fixed, const char *name, struct cma **res_cma)
249 phys_addr_t memblock_end = memblock_end_of_DRAM();
250 phys_addr_t highmem_start;
254 * We can't use __pa(high_memory) directly, since high_memory
255 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
256 * complain. Find the boundary by adding one to the last valid
259 highmem_start = __pa(high_memory - 1) + 1;
260 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
261 __func__, &size, &base, &limit, &alignment);
263 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
264 pr_err("Not enough slots for CMA reserved regions!\n");
271 if (alignment && !is_power_of_2(alignment))
275 * Sanitise input arguments.
276 * Pages both ends in CMA area could be merged into adjacent unmovable
277 * migratetype page by page allocator's buddy algorithm. In the case,
278 * you couldn't get a contiguous memory, which is not what we want.
280 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
281 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
282 base = ALIGN(base, alignment);
283 size = ALIGN(size, alignment);
284 limit &= ~(alignment - 1);
289 /* size should be aligned with order_per_bit */
290 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
294 * If allocating at a fixed base the request region must not cross the
295 * low/high memory boundary.
297 if (fixed && base < highmem_start && base + size > highmem_start) {
299 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
300 &base, &highmem_start);
305 * If the limit is unspecified or above the memblock end, its effective
306 * value will be the memblock end. Set it explicitly to simplify further
309 if (limit == 0 || limit > memblock_end)
310 limit = memblock_end;
314 if (memblock_is_region_reserved(base, size) ||
315 memblock_reserve(base, size) < 0) {
320 phys_addr_t addr = 0;
323 * All pages in the reserved area must come from the same zone.
324 * If the requested region crosses the low/high memory boundary,
325 * try allocating from high memory first and fall back to low
326 * memory in case of failure.
328 if (base < highmem_start && limit > highmem_start) {
329 addr = memblock_alloc_range(size, alignment,
330 highmem_start, limit,
332 limit = highmem_start;
336 addr = memblock_alloc_range(size, alignment, base,
346 * kmemleak scans/reads tracked objects for pointers to other
347 * objects but this address isn't mapped and accessible
349 kmemleak_ignore_phys(addr);
353 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
357 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
362 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
366 #ifdef CONFIG_CMA_DEBUG
367 static void cma_debug_show_areas(struct cma *cma)
369 unsigned long next_zero_bit, next_set_bit;
370 unsigned long start = 0;
371 unsigned int nr_zero, nr_total = 0;
373 mutex_lock(&cma->lock);
374 pr_info("number of available pages: ");
376 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
377 if (next_zero_bit >= cma->count)
379 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
380 nr_zero = next_set_bit - next_zero_bit;
381 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
383 start = next_zero_bit + nr_zero;
385 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
386 mutex_unlock(&cma->lock);
389 static inline void cma_debug_show_areas(struct cma *cma) { }
393 * cma_alloc() - allocate pages from contiguous area
394 * @cma: Contiguous memory region for which the allocation is performed.
395 * @count: Requested number of pages.
396 * @align: Requested alignment of pages (in PAGE_SIZE order).
397 * @gfp_mask: GFP mask to use during compaction
399 * This function allocates part of contiguous memory on specific
400 * contiguous memory area.
402 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
405 unsigned long mask, offset;
406 unsigned long pfn = -1;
407 unsigned long start = 0;
408 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
409 struct page *page = NULL;
412 if (!cma || !cma->count)
415 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
421 mask = cma_bitmap_aligned_mask(cma, align);
422 offset = cma_bitmap_aligned_offset(cma, align);
423 bitmap_maxno = cma_bitmap_maxno(cma);
424 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
426 if (bitmap_count > bitmap_maxno)
430 mutex_lock(&cma->lock);
431 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
432 bitmap_maxno, start, bitmap_count, mask,
434 if (bitmap_no >= bitmap_maxno) {
435 mutex_unlock(&cma->lock);
438 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
440 * It's safe to drop the lock here. We've marked this region for
441 * our exclusive use. If the migration fails we will take the
442 * lock again and unmark it.
444 mutex_unlock(&cma->lock);
446 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
447 mutex_lock(&cma_mutex);
448 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
450 mutex_unlock(&cma_mutex);
452 page = pfn_to_page(pfn);
456 cma_clear_bitmap(cma, pfn, count);
460 pr_debug("%s(): memory range at %p is busy, retrying\n",
461 __func__, pfn_to_page(pfn));
462 /* try again with a bit different memory target */
463 start = bitmap_no + mask + 1;
466 trace_cma_alloc(pfn, page, count, align);
468 if (ret && !(gfp_mask & __GFP_NOWARN)) {
469 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
470 __func__, count, ret);
471 cma_debug_show_areas(cma);
474 pr_debug("%s(): returned %p\n", __func__, page);
479 * cma_release() - release allocated pages
480 * @cma: Contiguous memory region for which the allocation is performed.
481 * @pages: Allocated pages.
482 * @count: Number of allocated pages.
484 * This function releases memory allocated by alloc_cma().
485 * It returns false when provided pages do not belong to contiguous area and
488 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
495 pr_debug("%s(page %p)\n", __func__, (void *)pages);
497 pfn = page_to_pfn(pages);
499 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
502 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
504 free_contig_range(pfn, count);
505 cma_clear_bitmap(cma, pfn, count);
506 trace_cma_release(pfn, pages, count);
511 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
515 for (i = 0; i < cma_area_count; i++) {
516 int ret = it(&cma_areas[i], data);