]>
Commit | Line | Data |
---|---|---|
f14f75b8 | 1 | /* |
7f184275 YH |
2 | * Basic general purpose allocator for managing special purpose |
3 | * memory, for example, memory that is not managed by the regular | |
4 | * kmalloc/kfree interface. Uses for this includes on-device special | |
5 | * memory, uncached memory etc. | |
6 | * | |
7 | * It is safe to use the allocator in NMI handlers and other special | |
8 | * unblockable contexts that could otherwise deadlock on locks. This | |
9 | * is implemented by using atomic operations and retries on any | |
10 | * conflicts. The disadvantage is that there may be livelocks in | |
11 | * extreme cases. For better scalability, one allocator can be used | |
12 | * for each CPU. | |
13 | * | |
14 | * The lockless operation only works if there is enough memory | |
15 | * available. If new memory is added to the pool a lock has to be | |
16 | * still taken. So any user relying on locklessness has to ensure | |
17 | * that sufficient memory is preallocated. | |
18 | * | |
19 | * The basic atomic operation of this allocator is cmpxchg on long. | |
20 | * On architectures that don't have NMI-safe cmpxchg implementation, | |
21 | * the allocator can NOT be used in NMI handler. So code uses the | |
22 | * allocator in NMI handler should depend on | |
23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | |
f14f75b8 | 24 | * |
f14f75b8 JS |
25 | * Copyright 2005 (C) Jes Sorensen <[email protected]> |
26 | * | |
27 | * This source code is licensed under the GNU General Public License, | |
28 | * Version 2. See the file COPYING for more details. | |
29 | */ | |
30 | ||
5a0e3ad6 | 31 | #include <linux/slab.h> |
8bc3bcc9 | 32 | #include <linux/export.h> |
243797f5 | 33 | #include <linux/bitmap.h> |
7f184275 YH |
34 | #include <linux/rculist.h> |
35 | #include <linux/interrupt.h> | |
f14f75b8 | 36 | #include <linux/genalloc.h> |
9375db07 | 37 | #include <linux/of_device.h> |
f14f75b8 | 38 | |
674470d9 JS |
39 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
40 | { | |
41 | return chunk->end_addr - chunk->start_addr + 1; | |
42 | } | |
43 | ||
7f184275 YH |
44 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) |
45 | { | |
46 | unsigned long val, nval; | |
47 | ||
48 | nval = *addr; | |
49 | do { | |
50 | val = nval; | |
51 | if (val & mask_to_set) | |
52 | return -EBUSY; | |
53 | cpu_relax(); | |
54 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); | |
55 | ||
56 | return 0; | |
57 | } | |
58 | ||
59 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) | |
60 | { | |
61 | unsigned long val, nval; | |
62 | ||
63 | nval = *addr; | |
64 | do { | |
65 | val = nval; | |
66 | if ((val & mask_to_clear) != mask_to_clear) | |
67 | return -EBUSY; | |
68 | cpu_relax(); | |
69 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | /* | |
75 | * bitmap_set_ll - set the specified number of bits at the specified position | |
76 | * @map: pointer to a bitmap | |
77 | * @start: a bit position in @map | |
78 | * @nr: number of bits to set | |
79 | * | |
80 | * Set @nr bits start from @start in @map lock-lessly. Several users | |
81 | * can set/clear the same bitmap simultaneously without lock. If two | |
82 | * users set the same bit, one user will return remain bits, otherwise | |
83 | * return 0. | |
84 | */ | |
85 | static int bitmap_set_ll(unsigned long *map, int start, int nr) | |
86 | { | |
87 | unsigned long *p = map + BIT_WORD(start); | |
88 | const int size = start + nr; | |
89 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | |
90 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | |
91 | ||
92 | while (nr - bits_to_set >= 0) { | |
93 | if (set_bits_ll(p, mask_to_set)) | |
94 | return nr; | |
95 | nr -= bits_to_set; | |
96 | bits_to_set = BITS_PER_LONG; | |
97 | mask_to_set = ~0UL; | |
98 | p++; | |
99 | } | |
100 | if (nr) { | |
101 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | |
102 | if (set_bits_ll(p, mask_to_set)) | |
103 | return nr; | |
104 | } | |
105 | ||
106 | return 0; | |
107 | } | |
108 | ||
109 | /* | |
110 | * bitmap_clear_ll - clear the specified number of bits at the specified position | |
111 | * @map: pointer to a bitmap | |
112 | * @start: a bit position in @map | |
113 | * @nr: number of bits to set | |
114 | * | |
115 | * Clear @nr bits start from @start in @map lock-lessly. Several users | |
116 | * can set/clear the same bitmap simultaneously without lock. If two | |
117 | * users clear the same bit, one user will return remain bits, | |
118 | * otherwise return 0. | |
119 | */ | |
120 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) | |
121 | { | |
122 | unsigned long *p = map + BIT_WORD(start); | |
123 | const int size = start + nr; | |
124 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | |
125 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | |
126 | ||
127 | while (nr - bits_to_clear >= 0) { | |
128 | if (clear_bits_ll(p, mask_to_clear)) | |
129 | return nr; | |
130 | nr -= bits_to_clear; | |
131 | bits_to_clear = BITS_PER_LONG; | |
132 | mask_to_clear = ~0UL; | |
133 | p++; | |
134 | } | |
135 | if (nr) { | |
136 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | |
137 | if (clear_bits_ll(p, mask_to_clear)) | |
138 | return nr; | |
139 | } | |
140 | ||
141 | return 0; | |
142 | } | |
f14f75b8 | 143 | |
a58cbd7c DN |
144 | /** |
145 | * gen_pool_create - create a new special memory pool | |
929f9727 DN |
146 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
147 | * @nid: node id of the node the pool structure should be allocated on, or -1 | |
a58cbd7c DN |
148 | * |
149 | * Create a new special memory pool that can be used to manage special purpose | |
150 | * memory not managed by the regular kmalloc/kfree interface. | |
929f9727 DN |
151 | */ |
152 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |
f14f75b8 | 153 | { |
929f9727 | 154 | struct gen_pool *pool; |
f14f75b8 | 155 | |
929f9727 DN |
156 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
157 | if (pool != NULL) { | |
7f184275 | 158 | spin_lock_init(&pool->lock); |
929f9727 DN |
159 | INIT_LIST_HEAD(&pool->chunks); |
160 | pool->min_alloc_order = min_alloc_order; | |
ca279cf1 BG |
161 | pool->algo = gen_pool_first_fit; |
162 | pool->data = NULL; | |
c98c3635 | 163 | pool->name = NULL; |
929f9727 DN |
164 | } |
165 | return pool; | |
f14f75b8 JS |
166 | } |
167 | EXPORT_SYMBOL(gen_pool_create); | |
168 | ||
a58cbd7c | 169 | /** |
3c8f370d | 170 | * gen_pool_add_virt - add a new chunk of special memory to the pool |
929f9727 | 171 | * @pool: pool to add new memory chunk to |
3c8f370d JCPV |
172 | * @virt: virtual starting address of memory chunk to add to pool |
173 | * @phys: physical starting address of memory chunk to add to pool | |
929f9727 DN |
174 | * @size: size in bytes of the memory chunk to add to pool |
175 | * @nid: node id of the node the chunk structure and bitmap should be | |
176 | * allocated on, or -1 | |
a58cbd7c DN |
177 | * |
178 | * Add a new chunk of special memory to the specified pool. | |
3c8f370d JCPV |
179 | * |
180 | * Returns 0 on success or a -ve errno on failure. | |
f14f75b8 | 181 | */ |
3c8f370d JCPV |
182 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
183 | size_t size, int nid) | |
f14f75b8 | 184 | { |
929f9727 DN |
185 | struct gen_pool_chunk *chunk; |
186 | int nbits = size >> pool->min_alloc_order; | |
187 | int nbytes = sizeof(struct gen_pool_chunk) + | |
eedce141 | 188 | BITS_TO_LONGS(nbits) * sizeof(long); |
f14f75b8 | 189 | |
ade34a35 | 190 | chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); |
929f9727 | 191 | if (unlikely(chunk == NULL)) |
3c8f370d | 192 | return -ENOMEM; |
f14f75b8 | 193 | |
3c8f370d JCPV |
194 | chunk->phys_addr = phys; |
195 | chunk->start_addr = virt; | |
674470d9 | 196 | chunk->end_addr = virt + size - 1; |
7f184275 | 197 | atomic_set(&chunk->avail, size); |
f14f75b8 | 198 | |
7f184275 YH |
199 | spin_lock(&pool->lock); |
200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); | |
201 | spin_unlock(&pool->lock); | |
929f9727 DN |
202 | |
203 | return 0; | |
f14f75b8 | 204 | } |
3c8f370d JCPV |
205 | EXPORT_SYMBOL(gen_pool_add_virt); |
206 | ||
207 | /** | |
208 | * gen_pool_virt_to_phys - return the physical address of memory | |
209 | * @pool: pool to allocate from | |
210 | * @addr: starting address of memory | |
211 | * | |
212 | * Returns the physical address on success, or -1 on error. | |
213 | */ | |
214 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | |
215 | { | |
3c8f370d | 216 | struct gen_pool_chunk *chunk; |
7f184275 | 217 | phys_addr_t paddr = -1; |
3c8f370d | 218 | |
7f184275 YH |
219 | rcu_read_lock(); |
220 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
674470d9 | 221 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
7f184275 YH |
222 | paddr = chunk->phys_addr + (addr - chunk->start_addr); |
223 | break; | |
224 | } | |
3c8f370d | 225 | } |
7f184275 | 226 | rcu_read_unlock(); |
3c8f370d | 227 | |
7f184275 | 228 | return paddr; |
3c8f370d JCPV |
229 | } |
230 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | |
f14f75b8 | 231 | |
a58cbd7c DN |
232 | /** |
233 | * gen_pool_destroy - destroy a special memory pool | |
322acc96 | 234 | * @pool: pool to destroy |
a58cbd7c DN |
235 | * |
236 | * Destroy the specified special memory pool. Verifies that there are no | |
237 | * outstanding allocations. | |
322acc96 SW |
238 | */ |
239 | void gen_pool_destroy(struct gen_pool *pool) | |
240 | { | |
241 | struct list_head *_chunk, *_next_chunk; | |
242 | struct gen_pool_chunk *chunk; | |
243 | int order = pool->min_alloc_order; | |
244 | int bit, end_bit; | |
245 | ||
322acc96 SW |
246 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
247 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | |
248 | list_del(&chunk->next_chunk); | |
249 | ||
674470d9 | 250 | end_bit = chunk_size(chunk) >> order; |
322acc96 SW |
251 | bit = find_next_bit(chunk->bits, end_bit, 0); |
252 | BUG_ON(bit < end_bit); | |
253 | ||
254 | kfree(chunk); | |
255 | } | |
c98c3635 | 256 | kfree_const(pool->name); |
322acc96 | 257 | kfree(pool); |
322acc96 SW |
258 | } |
259 | EXPORT_SYMBOL(gen_pool_destroy); | |
260 | ||
a58cbd7c DN |
261 | /** |
262 | * gen_pool_alloc - allocate special memory from the pool | |
929f9727 DN |
263 | * @pool: pool to allocate from |
264 | * @size: number of bytes to allocate from the pool | |
a58cbd7c DN |
265 | * |
266 | * Allocate the requested number of bytes from the specified pool. | |
ca279cf1 BG |
267 | * Uses the pool allocation function (with first-fit algorithm by default). |
268 | * Can not be used in NMI handler on architectures without | |
269 | * NMI-safe cmpxchg implementation. | |
f14f75b8 | 270 | */ |
929f9727 | 271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
f14f75b8 | 272 | { |
929f9727 | 273 | struct gen_pool_chunk *chunk; |
7f184275 | 274 | unsigned long addr = 0; |
929f9727 | 275 | int order = pool->min_alloc_order; |
7f184275 YH |
276 | int nbits, start_bit = 0, end_bit, remain; |
277 | ||
278 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | |
279 | BUG_ON(in_nmi()); | |
280 | #endif | |
f14f75b8 | 281 | |
929f9727 DN |
282 | if (size == 0) |
283 | return 0; | |
f14f75b8 | 284 | |
929f9727 | 285 | nbits = (size + (1UL << order) - 1) >> order; |
7f184275 YH |
286 | rcu_read_lock(); |
287 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
288 | if (size > atomic_read(&chunk->avail)) | |
289 | continue; | |
929f9727 | 290 | |
674470d9 | 291 | end_bit = chunk_size(chunk) >> order; |
7f184275 | 292 | retry: |
ca279cf1 BG |
293 | start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, |
294 | pool->data); | |
7f184275 | 295 | if (start_bit >= end_bit) |
243797f5 | 296 | continue; |
7f184275 YH |
297 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
298 | if (remain) { | |
299 | remain = bitmap_clear_ll(chunk->bits, start_bit, | |
300 | nbits - remain); | |
301 | BUG_ON(remain); | |
302 | goto retry; | |
f14f75b8 | 303 | } |
243797f5 AM |
304 | |
305 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | |
7f184275 YH |
306 | size = nbits << order; |
307 | atomic_sub(size, &chunk->avail); | |
308 | break; | |
929f9727 | 309 | } |
7f184275 YH |
310 | rcu_read_unlock(); |
311 | return addr; | |
929f9727 DN |
312 | } |
313 | EXPORT_SYMBOL(gen_pool_alloc); | |
f14f75b8 | 314 | |
684f0d3d NC |
315 | /** |
316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | |
317 | * @pool: pool to allocate from | |
318 | * @size: number of bytes to allocate from the pool | |
0368dfd0 | 319 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
684f0d3d NC |
320 | * |
321 | * Allocate the requested number of bytes from the specified pool. | |
322 | * Uses the pool allocation function (with first-fit algorithm by default). | |
323 | * Can not be used in NMI handler on architectures without | |
324 | * NMI-safe cmpxchg implementation. | |
325 | */ | |
326 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) | |
327 | { | |
328 | unsigned long vaddr; | |
329 | ||
330 | if (!pool) | |
331 | return NULL; | |
332 | ||
333 | vaddr = gen_pool_alloc(pool, size); | |
334 | if (!vaddr) | |
335 | return NULL; | |
336 | ||
0368dfd0 LP |
337 | if (dma) |
338 | *dma = gen_pool_virt_to_phys(pool, vaddr); | |
684f0d3d NC |
339 | |
340 | return (void *)vaddr; | |
341 | } | |
342 | EXPORT_SYMBOL(gen_pool_dma_alloc); | |
343 | ||
a58cbd7c DN |
344 | /** |
345 | * gen_pool_free - free allocated special memory back to the pool | |
929f9727 DN |
346 | * @pool: pool to free to |
347 | * @addr: starting address of memory to free back to pool | |
348 | * @size: size in bytes of memory to free | |
a58cbd7c | 349 | * |
7f184275 YH |
350 | * Free previously allocated special memory back to the specified |
351 | * pool. Can not be used in NMI handler on architectures without | |
352 | * NMI-safe cmpxchg implementation. | |
929f9727 DN |
353 | */ |
354 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |
355 | { | |
929f9727 | 356 | struct gen_pool_chunk *chunk; |
929f9727 | 357 | int order = pool->min_alloc_order; |
7f184275 | 358 | int start_bit, nbits, remain; |
929f9727 | 359 | |
7f184275 YH |
360 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
361 | BUG_ON(in_nmi()); | |
362 | #endif | |
929f9727 | 363 | |
7f184275 YH |
364 | nbits = (size + (1UL << order) - 1) >> order; |
365 | rcu_read_lock(); | |
366 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
674470d9 JS |
367 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
368 | BUG_ON(addr + size - 1 > chunk->end_addr); | |
7f184275 YH |
369 | start_bit = (addr - chunk->start_addr) >> order; |
370 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); | |
371 | BUG_ON(remain); | |
372 | size = nbits << order; | |
373 | atomic_add(size, &chunk->avail); | |
374 | rcu_read_unlock(); | |
375 | return; | |
f14f75b8 | 376 | } |
f14f75b8 | 377 | } |
7f184275 YH |
378 | rcu_read_unlock(); |
379 | BUG(); | |
f14f75b8 JS |
380 | } |
381 | EXPORT_SYMBOL(gen_pool_free); | |
7f184275 YH |
382 | |
383 | /** | |
384 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | |
385 | * @pool: the generic memory pool | |
386 | * @func: func to call | |
387 | * @data: additional data used by @func | |
388 | * | |
389 | * Call @func for every chunk of generic memory pool. The @func is | |
390 | * called with rcu_read_lock held. | |
391 | */ | |
392 | void gen_pool_for_each_chunk(struct gen_pool *pool, | |
393 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), | |
394 | void *data) | |
395 | { | |
396 | struct gen_pool_chunk *chunk; | |
397 | ||
398 | rcu_read_lock(); | |
399 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) | |
400 | func(pool, chunk, data); | |
401 | rcu_read_unlock(); | |
402 | } | |
403 | EXPORT_SYMBOL(gen_pool_for_each_chunk); | |
404 | ||
9efb3a42 LA |
405 | /** |
406 | * addr_in_gen_pool - checks if an address falls within the range of a pool | |
407 | * @pool: the generic memory pool | |
408 | * @start: start address | |
409 | * @size: size of the region | |
410 | * | |
411 | * Check if the range of addresses falls within the specified pool. Returns | |
412 | * true if the entire range is contained in the pool and false otherwise. | |
413 | */ | |
414 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, | |
415 | size_t size) | |
416 | { | |
417 | bool found = false; | |
ad3d5d2f | 418 | unsigned long end = start + size - 1; |
9efb3a42 LA |
419 | struct gen_pool_chunk *chunk; |
420 | ||
421 | rcu_read_lock(); | |
422 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { | |
423 | if (start >= chunk->start_addr && start <= chunk->end_addr) { | |
424 | if (end <= chunk->end_addr) { | |
425 | found = true; | |
426 | break; | |
427 | } | |
428 | } | |
429 | } | |
430 | rcu_read_unlock(); | |
431 | return found; | |
432 | } | |
433 | ||
7f184275 YH |
434 | /** |
435 | * gen_pool_avail - get available free space of the pool | |
436 | * @pool: pool to get available free space | |
437 | * | |
438 | * Return available free space of the specified pool. | |
439 | */ | |
440 | size_t gen_pool_avail(struct gen_pool *pool) | |
441 | { | |
442 | struct gen_pool_chunk *chunk; | |
443 | size_t avail = 0; | |
444 | ||
445 | rcu_read_lock(); | |
446 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | |
447 | avail += atomic_read(&chunk->avail); | |
448 | rcu_read_unlock(); | |
449 | return avail; | |
450 | } | |
451 | EXPORT_SYMBOL_GPL(gen_pool_avail); | |
452 | ||
453 | /** | |
454 | * gen_pool_size - get size in bytes of memory managed by the pool | |
455 | * @pool: pool to get size | |
456 | * | |
457 | * Return size in bytes of memory managed by the pool. | |
458 | */ | |
459 | size_t gen_pool_size(struct gen_pool *pool) | |
460 | { | |
461 | struct gen_pool_chunk *chunk; | |
462 | size_t size = 0; | |
463 | ||
464 | rcu_read_lock(); | |
465 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | |
674470d9 | 466 | size += chunk_size(chunk); |
7f184275 YH |
467 | rcu_read_unlock(); |
468 | return size; | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(gen_pool_size); | |
ca279cf1 BG |
471 | |
472 | /** | |
473 | * gen_pool_set_algo - set the allocation algorithm | |
474 | * @pool: pool to change allocation algorithm | |
475 | * @algo: custom algorithm function | |
476 | * @data: additional data used by @algo | |
477 | * | |
478 | * Call @algo for each memory allocation in the pool. | |
479 | * If @algo is NULL use gen_pool_first_fit as default | |
480 | * memory allocation function. | |
481 | */ | |
482 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) | |
483 | { | |
484 | rcu_read_lock(); | |
485 | ||
486 | pool->algo = algo; | |
487 | if (!pool->algo) | |
488 | pool->algo = gen_pool_first_fit; | |
489 | ||
490 | pool->data = data; | |
491 | ||
492 | rcu_read_unlock(); | |
493 | } | |
494 | EXPORT_SYMBOL(gen_pool_set_algo); | |
495 | ||
496 | /** | |
497 | * gen_pool_first_fit - find the first available region | |
498 | * of memory matching the size requirement (no alignment constraint) | |
499 | * @map: The address to base the search on | |
500 | * @size: The bitmap size in bits | |
501 | * @start: The bitnumber to start searching at | |
502 | * @nr: The number of zeroed bits we're looking for | |
503 | * @data: additional data - unused | |
504 | */ | |
505 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, | |
506 | unsigned long start, unsigned int nr, void *data) | |
507 | { | |
508 | return bitmap_find_next_zero_area(map, size, start, nr, 0); | |
509 | } | |
510 | EXPORT_SYMBOL(gen_pool_first_fit); | |
511 | ||
505e3be6 LA |
512 | /** |
513 | * gen_pool_first_fit_order_align - find the first available region | |
514 | * of memory matching the size requirement. The region will be aligned | |
515 | * to the order of the size specified. | |
516 | * @map: The address to base the search on | |
517 | * @size: The bitmap size in bits | |
518 | * @start: The bitnumber to start searching at | |
519 | * @nr: The number of zeroed bits we're looking for | |
520 | * @data: additional data - unused | |
521 | */ | |
522 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, | |
523 | unsigned long size, unsigned long start, | |
524 | unsigned int nr, void *data) | |
525 | { | |
526 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; | |
527 | ||
528 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); | |
529 | } | |
530 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); | |
531 | ||
ca279cf1 BG |
532 | /** |
533 | * gen_pool_best_fit - find the best fitting region of memory | |
534 | * macthing the size requirement (no alignment constraint) | |
535 | * @map: The address to base the search on | |
536 | * @size: The bitmap size in bits | |
537 | * @start: The bitnumber to start searching at | |
538 | * @nr: The number of zeroed bits we're looking for | |
539 | * @data: additional data - unused | |
540 | * | |
541 | * Iterate over the bitmap to find the smallest free region | |
542 | * which we can allocate the memory. | |
543 | */ | |
544 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | |
545 | unsigned long start, unsigned int nr, void *data) | |
546 | { | |
547 | unsigned long start_bit = size; | |
548 | unsigned long len = size + 1; | |
549 | unsigned long index; | |
550 | ||
551 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); | |
552 | ||
553 | while (index < size) { | |
554 | int next_bit = find_next_bit(map, size, index + nr); | |
555 | if ((next_bit - index) < len) { | |
556 | len = next_bit - index; | |
557 | start_bit = index; | |
558 | if (len == nr) | |
559 | return start_bit; | |
560 | } | |
561 | index = bitmap_find_next_zero_area(map, size, | |
562 | next_bit + 1, nr, 0); | |
563 | } | |
564 | ||
565 | return start_bit; | |
566 | } | |
567 | EXPORT_SYMBOL(gen_pool_best_fit); | |
9375db07 PZ |
568 | |
569 | static void devm_gen_pool_release(struct device *dev, void *res) | |
570 | { | |
571 | gen_pool_destroy(*(struct gen_pool **)res); | |
572 | } | |
573 | ||
c98c3635 VZ |
574 | static int devm_gen_pool_match(struct device *dev, void *res, void *data) |
575 | { | |
576 | struct gen_pool **p = res; | |
577 | ||
578 | /* NULL data matches only a pool without an assigned name */ | |
579 | if (!data && !(*p)->name) | |
580 | return 1; | |
581 | ||
582 | if (!data || !(*p)->name) | |
583 | return 0; | |
584 | ||
585 | return !strcmp((*p)->name, data); | |
586 | } | |
587 | ||
73858173 VZ |
588 | /** |
589 | * gen_pool_get - Obtain the gen_pool (if any) for a device | |
590 | * @dev: device to retrieve the gen_pool from | |
591 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device | |
592 | * | |
593 | * Returns the gen_pool for the device if one is present, or NULL. | |
594 | */ | |
595 | struct gen_pool *gen_pool_get(struct device *dev, const char *name) | |
596 | { | |
597 | struct gen_pool **p; | |
598 | ||
c98c3635 VZ |
599 | p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, |
600 | (void *)name); | |
73858173 VZ |
601 | if (!p) |
602 | return NULL; | |
603 | return *p; | |
604 | } | |
605 | EXPORT_SYMBOL_GPL(gen_pool_get); | |
606 | ||
9375db07 PZ |
607 | /** |
608 | * devm_gen_pool_create - managed gen_pool_create | |
609 | * @dev: device that provides the gen_pool | |
610 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents | |
73858173 VZ |
611 | * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes |
612 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device | |
9375db07 PZ |
613 | * |
614 | * Create a new special memory pool that can be used to manage special purpose | |
615 | * memory not managed by the regular kmalloc/kfree interface. The pool will be | |
616 | * automatically destroyed by the device management code. | |
617 | */ | |
618 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, | |
73858173 | 619 | int nid, const char *name) |
9375db07 PZ |
620 | { |
621 | struct gen_pool **ptr, *pool; | |
c98c3635 | 622 | const char *pool_name = NULL; |
9375db07 | 623 | |
73858173 VZ |
624 | /* Check that genpool to be created is uniquely addressed on device */ |
625 | if (gen_pool_get(dev, name)) | |
626 | return ERR_PTR(-EINVAL); | |
627 | ||
c98c3635 VZ |
628 | if (name) { |
629 | pool_name = kstrdup_const(name, GFP_KERNEL); | |
630 | if (!pool_name) | |
631 | return ERR_PTR(-ENOMEM); | |
632 | } | |
633 | ||
9375db07 | 634 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
310ee9e8 | 635 | if (!ptr) |
c98c3635 | 636 | goto free_pool_name; |
9375db07 PZ |
637 | |
638 | pool = gen_pool_create(min_alloc_order, nid); | |
c98c3635 VZ |
639 | if (!pool) |
640 | goto free_devres; | |
641 | ||
642 | *ptr = pool; | |
643 | pool->name = pool_name; | |
644 | devres_add(dev, ptr); | |
9375db07 PZ |
645 | |
646 | return pool; | |
c98c3635 VZ |
647 | |
648 | free_devres: | |
649 | devres_free(ptr); | |
650 | free_pool_name: | |
651 | kfree_const(pool_name); | |
652 | ||
653 | return ERR_PTR(-ENOMEM); | |
9375db07 | 654 | } |
b724aa21 | 655 | EXPORT_SYMBOL(devm_gen_pool_create); |
9375db07 | 656 | |
9375db07 PZ |
657 | #ifdef CONFIG_OF |
658 | /** | |
abdd4a70 | 659 | * of_gen_pool_get - find a pool by phandle property |
9375db07 PZ |
660 | * @np: device node |
661 | * @propname: property name containing phandle(s) | |
662 | * @index: index into the phandle array | |
663 | * | |
664 | * Returns the pool that contains the chunk starting at the physical | |
665 | * address of the device tree node pointed at by the phandle property, | |
666 | * or NULL if not found. | |
667 | */ | |
abdd4a70 | 668 | struct gen_pool *of_gen_pool_get(struct device_node *np, |
9375db07 PZ |
669 | const char *propname, int index) |
670 | { | |
671 | struct platform_device *pdev; | |
c98c3635 VZ |
672 | struct device_node *np_pool, *parent; |
673 | const char *name = NULL; | |
674 | struct gen_pool *pool = NULL; | |
9375db07 PZ |
675 | |
676 | np_pool = of_parse_phandle(np, propname, index); | |
677 | if (!np_pool) | |
678 | return NULL; | |
c98c3635 | 679 | |
9375db07 | 680 | pdev = of_find_device_by_node(np_pool); |
c98c3635 VZ |
681 | if (!pdev) { |
682 | /* Check if named gen_pool is created by parent node device */ | |
683 | parent = of_get_parent(np_pool); | |
684 | pdev = of_find_device_by_node(parent); | |
685 | of_node_put(parent); | |
686 | ||
687 | of_property_read_string(np_pool, "label", &name); | |
688 | if (!name) | |
689 | name = np_pool->name; | |
690 | } | |
691 | if (pdev) | |
692 | pool = gen_pool_get(&pdev->dev, name); | |
6f3aabd1 | 693 | of_node_put(np_pool); |
c98c3635 VZ |
694 | |
695 | return pool; | |
9375db07 | 696 | } |
abdd4a70 | 697 | EXPORT_SYMBOL_GPL(of_gen_pool_get); |
9375db07 | 698 | #endif /* CONFIG_OF */ |