]> Git Repo - linux.git/blame - mm/cma.c
mm, oom: remove 3% bonus for CAP_SYS_ADMIN processes
[linux.git] / mm / cma.c
CommitLineData
a254129e
JK
1/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <[email protected]>
9 * Michal Nazarewicz <[email protected]>
10 * Aneesh Kumar K.V <[email protected]>
11 * Joonsoo Kim <[email protected]>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
99e8ea6c 26#define CREATE_TRACE_POINTS
a254129e
JK
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
f7426b98 36#include <linux/highmem.h>
620951e2 37#include <linux/io.h>
99e8ea6c 38#include <trace/events/cma.h>
a254129e 39
28b24c1f
SL
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
a254129e
JK
44static DEFINE_MUTEX(cma_mutex);
45
ac173824 46phys_addr_t cma_get_base(const struct cma *cma)
a254129e
JK
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
ac173824 51unsigned long cma_get_size(const struct cma *cma)
a254129e
JK
52{
53 return cma->count << PAGE_SHIFT;
54}
55
f318dd08
LA
56const char *cma_get_name(const struct cma *cma)
57{
58 return cma->name ? cma->name : "(undefined)";
59}
60
ac173824 61static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32 62 unsigned int align_order)
a254129e 63{
68faed63
WY
64 if (align_order <= cma->order_per_bit)
65 return 0;
66 return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e
JK
67}
68
850fc430 69/*
e048cb32
DB
70 * Find the offset of the base PFN from the specified align_order.
71 * The value returned is represented in order_per_bits.
850fc430 72 */
ac173824 73static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32 74 unsigned int align_order)
b5be83e3 75{
e048cb32
DB
76 return (cma->base_pfn & ((1UL << align_order) - 1))
77 >> cma->order_per_bit;
b5be83e3
GF
78}
79
ac173824
SL
80static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
81 unsigned long pages)
a254129e
JK
82{
83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
84}
85
ac173824
SL
86static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
87 unsigned int count)
a254129e
JK
88{
89 unsigned long bitmap_no, bitmap_count;
90
91 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
92 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
93
94 mutex_lock(&cma->lock);
95 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
96 mutex_unlock(&cma->lock);
97}
98
99static int __init cma_activate_area(struct cma *cma)
100{
101 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
102 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
103 unsigned i = cma->count >> pageblock_order;
104 struct zone *zone;
105
106 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
107
108 if (!cma->bitmap)
109 return -ENOMEM;
110
111 WARN_ON_ONCE(!pfn_valid(pfn));
112 zone = page_zone(pfn_to_page(pfn));
113
114 do {
115 unsigned j;
116
117 base_pfn = pfn;
118 for (j = pageblock_nr_pages; j; --j, pfn++) {
119 WARN_ON_ONCE(!pfn_valid(pfn));
120 /*
121 * alloc_contig_range requires the pfn range
122 * specified to be in the same zone. Make this
123 * simple by forcing the entire CMA resv range
124 * to be in the same zone.
125 */
126 if (page_zone(pfn_to_page(pfn)) != zone)
e35ef639 127 goto not_in_zone;
a254129e
JK
128 }
129 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
130 } while (--i);
131
132 mutex_init(&cma->lock);
26b02a1f
SL
133
134#ifdef CONFIG_CMA_DEBUGFS
135 INIT_HLIST_HEAD(&cma->mem_head);
136 spin_lock_init(&cma->mem_head_lock);
137#endif
138
a254129e
JK
139 return 0;
140
e35ef639
AK
141not_in_zone:
142 pr_err("CMA area %s could not be activated\n", cma->name);
a254129e 143 kfree(cma->bitmap);
f022d8cb 144 cma->count = 0;
a254129e
JK
145 return -EINVAL;
146}
147
148static int __init cma_init_reserved_areas(void)
149{
150 int i;
151
152 for (i = 0; i < cma_area_count; i++) {
153 int ret = cma_activate_area(&cma_areas[i]);
154
155 if (ret)
156 return ret;
157 }
158
159 return 0;
160}
161core_initcall(cma_init_reserved_areas);
162
de9e14ee
MS
163/**
164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
165 * @base: Base address of the reserved area
166 * @size: Size of the reserved area (in bytes),
167 * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc
MR
168 * @name: The name of the area. If this parameter is NULL, the name of
169 * the area will be set to "cmaN", where N is a running counter of
170 * used areas.
de9e14ee
MS
171 * @res_cma: Pointer to store the created cma region.
172 *
173 * This function creates custom contiguous area from already reserved memory.
174 */
175int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac173824 176 unsigned int order_per_bit,
f318dd08 177 const char *name,
ac173824 178 struct cma **res_cma)
de9e14ee
MS
179{
180 struct cma *cma;
181 phys_addr_t alignment;
182
183 /* Sanity checks */
184 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
185 pr_err("Not enough slots for CMA reserved regions!\n");
186 return -ENOSPC;
187 }
188
189 if (!size || !memblock_is_region_reserved(base, size))
190 return -EINVAL;
191
0f96ae29 192 /* ensure minimal alignment required by mm core */
badbda53
SR
193 alignment = PAGE_SIZE <<
194 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14ee
MS
195
196 /* alignment should be aligned with order_per_bit */
197 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
198 return -EINVAL;
199
200 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
201 return -EINVAL;
202
203 /*
204 * Each reserved area must be initialised later, when more kernel
205 * subsystems (like slab allocator) are available.
206 */
207 cma = &cma_areas[cma_area_count];
f318dd08
LA
208 if (name) {
209 cma->name = name;
210 } else {
211 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
212 if (!cma->name)
213 return -ENOMEM;
214 }
de9e14ee
MS
215 cma->base_pfn = PFN_DOWN(base);
216 cma->count = size >> PAGE_SHIFT;
217 cma->order_per_bit = order_per_bit;
218 *res_cma = cma;
219 cma_area_count++;
94737a85 220 totalcma_pages += (size / PAGE_SIZE);
de9e14ee
MS
221
222 return 0;
223}
224
a254129e
JK
225/**
226 * cma_declare_contiguous() - reserve custom contiguous area
a254129e 227 * @base: Base address of the reserved area optional, use 0 for any
c1f733aa 228 * @size: Size of the reserved area (in bytes),
a254129e
JK
229 * @limit: End address of the reserved memory (optional, 0 for any).
230 * @alignment: Alignment for the CMA area, should be power of 2 or zero
231 * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e 232 * @fixed: hint about where to place the reserved area
e8b098fc 233 * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aa 234 * @res_cma: Pointer to store the created cma region.
a254129e
JK
235 *
236 * This function reserves memory from early allocator. It should be
237 * called by arch specific code once the early allocator (memblock or bootmem)
238 * has been activated and all other subsystems have already allocated/reserved
239 * memory. This function allows to create custom reserved areas.
240 *
241 * If @fixed is true, reserve contiguous area at exactly @base. If false,
242 * reserve in range from @base to @limit.
243 */
c1f733aa
JK
244int __init cma_declare_contiguous(phys_addr_t base,
245 phys_addr_t size, phys_addr_t limit,
a254129e 246 phys_addr_t alignment, unsigned int order_per_bit,
f318dd08 247 bool fixed, const char *name, struct cma **res_cma)
a254129e 248{
f7426b98 249 phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a 250 phys_addr_t highmem_start;
a254129e
JK
251 int ret = 0;
252
6b101e2a 253 /*
2dece445
LA
254 * We can't use __pa(high_memory) directly, since high_memory
255 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
256 * complain. Find the boundary by adding one to the last valid
257 * address.
6b101e2a 258 */
2dece445 259 highmem_start = __pa(high_memory - 1) + 1;
56fa4f60
LP
260 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
261 __func__, &size, &base, &limit, &alignment);
a254129e
JK
262
263 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
264 pr_err("Not enough slots for CMA reserved regions!\n");
265 return -ENOSPC;
266 }
267
268 if (!size)
269 return -EINVAL;
270
271 if (alignment && !is_power_of_2(alignment))
272 return -EINVAL;
273
274 /*
275 * Sanitise input arguments.
276 * Pages both ends in CMA area could be merged into adjacent unmovable
277 * migratetype page by page allocator's buddy algorithm. In the case,
278 * you couldn't get a contiguous memory, which is not what we want.
279 */
badbda53
SR
280 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
281 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
a254129e
JK
282 base = ALIGN(base, alignment);
283 size = ALIGN(size, alignment);
284 limit &= ~(alignment - 1);
285
800a85d3
LP
286 if (!base)
287 fixed = false;
288
a254129e
JK
289 /* size should be aligned with order_per_bit */
290 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
291 return -EINVAL;
292
f7426b98 293 /*
16195ddd
LP
294 * If allocating at a fixed base the request region must not cross the
295 * low/high memory boundary.
f7426b98 296 */
16195ddd 297 if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b98 298 ret = -EINVAL;
56fa4f60
LP
299 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
300 &base, &highmem_start);
f7426b98
MS
301 goto err;
302 }
303
16195ddd
LP
304 /*
305 * If the limit is unspecified or above the memblock end, its effective
306 * value will be the memblock end. Set it explicitly to simplify further
307 * checks.
308 */
309 if (limit == 0 || limit > memblock_end)
310 limit = memblock_end;
311
a254129e 312 /* Reserve memory */
800a85d3 313 if (fixed) {
a254129e
JK
314 if (memblock_is_region_reserved(base, size) ||
315 memblock_reserve(base, size) < 0) {
316 ret = -EBUSY;
317 goto err;
318 }
319 } else {
16195ddd
LP
320 phys_addr_t addr = 0;
321
322 /*
323 * All pages in the reserved area must come from the same zone.
324 * If the requested region crosses the low/high memory boundary,
325 * try allocating from high memory first and fall back to low
326 * memory in case of failure.
327 */
328 if (base < highmem_start && limit > highmem_start) {
329 addr = memblock_alloc_range(size, alignment,
fc6daaf9
TL
330 highmem_start, limit,
331 MEMBLOCK_NONE);
16195ddd
LP
332 limit = highmem_start;
333 }
334
a254129e 335 if (!addr) {
16195ddd 336 addr = memblock_alloc_range(size, alignment, base,
fc6daaf9
TL
337 limit,
338 MEMBLOCK_NONE);
16195ddd
LP
339 if (!addr) {
340 ret = -ENOMEM;
341 goto err;
342 }
a254129e 343 }
16195ddd 344
620951e2
TR
345 /*
346 * kmemleak scans/reads tracked objects for pointers to other
347 * objects but this address isn't mapped and accessible
348 */
9099daed 349 kmemleak_ignore_phys(addr);
16195ddd 350 base = addr;
a254129e
JK
351 }
352
f318dd08 353 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14ee
MS
354 if (ret)
355 goto err;
a254129e 356
56fa4f60
LP
357 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
358 &base);
a254129e
JK
359 return 0;
360
361err:
0de9d2eb 362 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
a254129e
JK
363 return ret;
364}
365
dbe43d4d
JK
366#ifdef CONFIG_CMA_DEBUG
367static void cma_debug_show_areas(struct cma *cma)
368{
369 unsigned long next_zero_bit, next_set_bit;
370 unsigned long start = 0;
371 unsigned int nr_zero, nr_total = 0;
372
373 mutex_lock(&cma->lock);
374 pr_info("number of available pages: ");
375 for (;;) {
376 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
377 if (next_zero_bit >= cma->count)
378 break;
379 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
380 nr_zero = next_set_bit - next_zero_bit;
381 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
382 nr_total += nr_zero;
383 start = next_zero_bit + nr_zero;
384 }
385 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
386 mutex_unlock(&cma->lock);
387}
388#else
389static inline void cma_debug_show_areas(struct cma *cma) { }
390#endif
391
a254129e
JK
392/**
393 * cma_alloc() - allocate pages from contiguous area
394 * @cma: Contiguous memory region for which the allocation is performed.
395 * @count: Requested number of pages.
396 * @align: Requested alignment of pages (in PAGE_SIZE order).
e8b098fc 397 * @gfp_mask: GFP mask to use during compaction
a254129e
JK
398 *
399 * This function allocates part of contiguous memory on specific
400 * contiguous memory area.
401 */
e2f466e3
LS
402struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
403 gfp_t gfp_mask)
a254129e 404{
3acaea68
AM
405 unsigned long mask, offset;
406 unsigned long pfn = -1;
407 unsigned long start = 0;
a254129e
JK
408 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
409 struct page *page = NULL;
dbe43d4d 410 int ret = -ENOMEM;
a254129e
JK
411
412 if (!cma || !cma->count)
413 return NULL;
414
67a2e213 415 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
a254129e
JK
416 count, align);
417
418 if (!count)
419 return NULL;
420
421 mask = cma_bitmap_aligned_mask(cma, align);
b5be83e3 422 offset = cma_bitmap_aligned_offset(cma, align);
a254129e
JK
423 bitmap_maxno = cma_bitmap_maxno(cma);
424 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
425
6b36ba59
SH
426 if (bitmap_count > bitmap_maxno)
427 return NULL;
428
a254129e
JK
429 for (;;) {
430 mutex_lock(&cma->lock);
b5be83e3
GF
431 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
432 bitmap_maxno, start, bitmap_count, mask,
433 offset);
a254129e
JK
434 if (bitmap_no >= bitmap_maxno) {
435 mutex_unlock(&cma->lock);
436 break;
437 }
438 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
439 /*
440 * It's safe to drop the lock here. We've marked this region for
441 * our exclusive use. If the migration fails we will take the
442 * lock again and unmark it.
443 */
444 mutex_unlock(&cma->lock);
445
446 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
447 mutex_lock(&cma_mutex);
ca96b625 448 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
e2f466e3 449 gfp_mask);
a254129e
JK
450 mutex_unlock(&cma_mutex);
451 if (ret == 0) {
452 page = pfn_to_page(pfn);
453 break;
a254129e 454 }
b7155e76 455
a254129e 456 cma_clear_bitmap(cma, pfn, count);
b7155e76
JK
457 if (ret != -EBUSY)
458 break;
459
a254129e
JK
460 pr_debug("%s(): memory range at %p is busy, retrying\n",
461 __func__, pfn_to_page(pfn));
462 /* try again with a bit different memory target */
463 start = bitmap_no + mask + 1;
464 }
465
3acaea68 466 trace_cma_alloc(pfn, page, count, align);
99e8ea6c 467
ef465014 468 if (ret && !(gfp_mask & __GFP_NOWARN)) {
5984af10 469 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
dbe43d4d
JK
470 __func__, count, ret);
471 cma_debug_show_areas(cma);
472 }
473
a254129e
JK
474 pr_debug("%s(): returned %p\n", __func__, page);
475 return page;
476}
477
478/**
479 * cma_release() - release allocated pages
480 * @cma: Contiguous memory region for which the allocation is performed.
481 * @pages: Allocated pages.
482 * @count: Number of allocated pages.
483 *
484 * This function releases memory allocated by alloc_cma().
485 * It returns false when provided pages do not belong to contiguous area and
486 * true otherwise.
487 */
ac173824 488bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e
JK
489{
490 unsigned long pfn;
491
492 if (!cma || !pages)
493 return false;
494
495 pr_debug("%s(page %p)\n", __func__, (void *)pages);
496
497 pfn = page_to_pfn(pages);
498
499 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
500 return false;
501
502 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
503
504 free_contig_range(pfn, count);
505 cma_clear_bitmap(cma, pfn, count);
99e8ea6c 506 trace_cma_release(pfn, pages, count);
a254129e
JK
507
508 return true;
509}
e4231bcd
LA
510
511int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
512{
513 int i;
514
515 for (i = 0; i < cma_area_count; i++) {
516 int ret = it(&cma_areas[i], data);
517
518 if (ret)
519 return ret;
520 }
521
522 return 0;
523}
This page took 0.286579 seconds and 4 git commands to generate.