]>
Commit | Line | Data |
---|---|---|
55716d26 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
fbf59bc9 | 2 | /* |
88999a89 | 3 | * mm/percpu.c - percpu memory allocator |
fbf59bc9 TH |
4 | * |
5 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
6 | * Copyright (C) 2009 Tejun Heo <[email protected]> | |
7 | * | |
5e81ee3e | 8 | * Copyright (C) 2017 Facebook Inc. |
bfacd38f | 9 | * Copyright (C) 2017 Dennis Zhou <[email protected]> |
5e81ee3e | 10 | * |
9c015162 DZF |
11 | * The percpu allocator handles both static and dynamic areas. Percpu |
12 | * areas are allocated in chunks which are divided into units. There is | |
13 | * a 1-to-1 mapping for units to possible cpus. These units are grouped | |
14 | * based on NUMA properties of the machine. | |
fbf59bc9 TH |
15 | * |
16 | * c0 c1 c2 | |
17 | * ------------------- ------------------- ------------ | |
18 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
19 | * ------------------- ...... ------------------- .... ------------ | |
20 | * | |
9c015162 DZF |
21 | * Allocation is done by offsets into a unit's address space. Ie., an |
22 | * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, | |
23 | * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear | |
24 | * and even sparse. Access is handled by configuring percpu base | |
25 | * registers according to the cpu to unit mappings and offsetting the | |
26 | * base address using pcpu_unit_size. | |
27 | * | |
28 | * There is special consideration for the first chunk which must handle | |
29 | * the static percpu variables in the kernel image as allocation services | |
5e81ee3e | 30 | * are not online yet. In short, the first chunk is structured like so: |
9c015162 DZF |
31 | * |
32 | * <Static | [Reserved] | Dynamic> | |
33 | * | |
34 | * The static data is copied from the original section managed by the | |
35 | * linker. The reserved section, if non-zero, primarily manages static | |
36 | * percpu variables from kernel modules. Finally, the dynamic section | |
37 | * takes care of normal allocations. | |
fbf59bc9 | 38 | * |
5e81ee3e | 39 | * The allocator organizes chunks into lists according to free size and |
3c7be18a RG |
40 | * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT |
41 | * flag should be passed. All memcg-aware allocations are sharing one set | |
42 | * of chunks and all unaccounted allocations and allocations performed | |
43 | * by processes belonging to the root memory cgroup are using the second set. | |
44 | * | |
45 | * The allocator tries to allocate from the fullest chunk first. Each chunk | |
46 | * is managed by a bitmap with metadata blocks. The allocation map is updated | |
47 | * on every allocation and free to reflect the current state while the boundary | |
5e81ee3e DZF |
48 | * map is only updated on allocation. Each metadata block contains |
49 | * information to help mitigate the need to iterate over large portions | |
50 | * of the bitmap. The reverse mapping from page to chunk is stored in | |
51 | * the page's index. Lastly, units are lazily backed and grow in unison. | |
52 | * | |
53 | * There is a unique conversion that goes on here between bytes and bits. | |
54 | * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk | |
55 | * tracks the number of pages it is responsible for in nr_pages. Helper | |
56 | * functions are used to convert from between the bytes, bits, and blocks. | |
57 | * All hints are managed in bits unless explicitly stated. | |
9c015162 | 58 | * |
4091fb95 | 59 | * To use this allocator, arch code should do the following: |
fbf59bc9 | 60 | * |
fbf59bc9 | 61 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
e0100983 TH |
62 | * regular address to percpu pointer and back if they need to be |
63 | * different from the default | |
fbf59bc9 | 64 | * |
8d408b4b TH |
65 | * - use pcpu_setup_first_chunk() during percpu area initialization to |
66 | * setup the first chunk containing the kernel static percpu area | |
fbf59bc9 TH |
67 | */ |
68 | ||
870d4b12 JP |
69 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
70 | ||
fbf59bc9 | 71 | #include <linux/bitmap.h> |
57c8a661 | 72 | #include <linux/memblock.h> |
fd1e8a1f | 73 | #include <linux/err.h> |
ca460b3c | 74 | #include <linux/lcm.h> |
fbf59bc9 | 75 | #include <linux/list.h> |
a530b795 | 76 | #include <linux/log2.h> |
fbf59bc9 TH |
77 | #include <linux/mm.h> |
78 | #include <linux/module.h> | |
79 | #include <linux/mutex.h> | |
80 | #include <linux/percpu.h> | |
81 | #include <linux/pfn.h> | |
fbf59bc9 | 82 | #include <linux/slab.h> |
ccea34b5 | 83 | #include <linux/spinlock.h> |
fbf59bc9 | 84 | #include <linux/vmalloc.h> |
a56dbddf | 85 | #include <linux/workqueue.h> |
f528f0b8 | 86 | #include <linux/kmemleak.h> |
71546d10 | 87 | #include <linux/sched.h> |
28307d93 | 88 | #include <linux/sched/mm.h> |
3c7be18a | 89 | #include <linux/memcontrol.h> |
fbf59bc9 TH |
90 | |
91 | #include <asm/cacheflush.h> | |
e0100983 | 92 | #include <asm/sections.h> |
fbf59bc9 | 93 | #include <asm/tlbflush.h> |
3b034b0d | 94 | #include <asm/io.h> |
fbf59bc9 | 95 | |
df95e795 DZ |
96 | #define CREATE_TRACE_POINTS |
97 | #include <trace/events/percpu.h> | |
98 | ||
8fa3ed80 DZ |
99 | #include "percpu-internal.h" |
100 | ||
40064aec DZF |
101 | /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ |
102 | #define PCPU_SLOT_BASE_SHIFT 5 | |
8744d859 DZ |
103 | /* chunks in slots below this are subject to being sidelined on failed alloc */ |
104 | #define PCPU_SLOT_FAIL_THRESHOLD 3 | |
40064aec | 105 | |
1a4d7607 TH |
106 | #define PCPU_EMPTY_POP_PAGES_LOW 2 |
107 | #define PCPU_EMPTY_POP_PAGES_HIGH 4 | |
fbf59bc9 | 108 | |
bbddff05 | 109 | #ifdef CONFIG_SMP |
e0100983 TH |
110 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
111 | #ifndef __addr_to_pcpu_ptr | |
112 | #define __addr_to_pcpu_ptr(addr) \ | |
43cf38eb TH |
113 | (void __percpu *)((unsigned long)(addr) - \ |
114 | (unsigned long)pcpu_base_addr + \ | |
115 | (unsigned long)__per_cpu_start) | |
e0100983 TH |
116 | #endif |
117 | #ifndef __pcpu_ptr_to_addr | |
118 | #define __pcpu_ptr_to_addr(ptr) \ | |
43cf38eb TH |
119 | (void __force *)((unsigned long)(ptr) + \ |
120 | (unsigned long)pcpu_base_addr - \ | |
121 | (unsigned long)__per_cpu_start) | |
e0100983 | 122 | #endif |
bbddff05 TH |
123 | #else /* CONFIG_SMP */ |
124 | /* on UP, it's always identity mapped */ | |
125 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | |
126 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | |
127 | #endif /* CONFIG_SMP */ | |
e0100983 | 128 | |
1328710b DM |
129 | static int pcpu_unit_pages __ro_after_init; |
130 | static int pcpu_unit_size __ro_after_init; | |
131 | static int pcpu_nr_units __ro_after_init; | |
132 | static int pcpu_atom_size __ro_after_init; | |
8fa3ed80 | 133 | int pcpu_nr_slots __ro_after_init; |
1328710b | 134 | static size_t pcpu_chunk_struct_size __ro_after_init; |
fbf59bc9 | 135 | |
a855b84c | 136 | /* cpus with the lowest and highest unit addresses */ |
1328710b DM |
137 | static unsigned int pcpu_low_unit_cpu __ro_after_init; |
138 | static unsigned int pcpu_high_unit_cpu __ro_after_init; | |
2f39e637 | 139 | |
fbf59bc9 | 140 | /* the address of the first chunk which starts with the kernel static area */ |
1328710b | 141 | void *pcpu_base_addr __ro_after_init; |
fbf59bc9 TH |
142 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
143 | ||
1328710b DM |
144 | static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ |
145 | const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ | |
2f39e637 | 146 | |
6563297c | 147 | /* group information, used for vm allocation */ |
1328710b DM |
148 | static int pcpu_nr_groups __ro_after_init; |
149 | static const unsigned long *pcpu_group_offsets __ro_after_init; | |
150 | static const size_t *pcpu_group_sizes __ro_after_init; | |
6563297c | 151 | |
ae9e6bc9 TH |
152 | /* |
153 | * The first chunk which always exists. Note that unlike other | |
154 | * chunks, this one can be allocated and mapped in several different | |
155 | * ways and thus often doesn't live in the vmalloc area. | |
156 | */ | |
8fa3ed80 | 157 | struct pcpu_chunk *pcpu_first_chunk __ro_after_init; |
ae9e6bc9 TH |
158 | |
159 | /* | |
160 | * Optional reserved chunk. This chunk reserves part of the first | |
e2266705 DZF |
161 | * chunk and serves it for reserved allocations. When the reserved |
162 | * region doesn't exist, the following variable is NULL. | |
ae9e6bc9 | 163 | */ |
8fa3ed80 | 164 | struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; |
edcb4639 | 165 | |
8fa3ed80 | 166 | DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
6710e594 | 167 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
fbf59bc9 | 168 | |
3c7be18a | 169 | struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ |
fbf59bc9 | 170 | |
4f996e23 TH |
171 | /* chunks which need their map areas extended, protected by pcpu_lock */ |
172 | static LIST_HEAD(pcpu_map_extend_chunks); | |
173 | ||
b539b87f TH |
174 | /* |
175 | * The number of empty populated pages, protected by pcpu_lock. The | |
176 | * reserved chunk doesn't contribute to the count. | |
177 | */ | |
6b9b6f39 | 178 | int pcpu_nr_empty_pop_pages; |
b539b87f | 179 | |
7e8a6304 DZF |
180 | /* |
181 | * The number of populated pages in use by the allocator, protected by | |
182 | * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets | |
183 | * allocated/deallocated, it is allocated/deallocated in all units of a chunk | |
184 | * and increments/decrements this count by 1). | |
185 | */ | |
186 | static unsigned long pcpu_nr_populated; | |
187 | ||
1a4d7607 TH |
188 | /* |
189 | * Balance work is used to populate or destroy chunks asynchronously. We | |
190 | * try to keep the number of populated free pages between | |
191 | * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one | |
192 | * empty chunk. | |
193 | */ | |
fe6bd8c3 TH |
194 | static void pcpu_balance_workfn(struct work_struct *work); |
195 | static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); | |
1a4d7607 TH |
196 | static bool pcpu_async_enabled __read_mostly; |
197 | static bool pcpu_atomic_alloc_failed; | |
198 | ||
199 | static void pcpu_schedule_balance_work(void) | |
200 | { | |
201 | if (pcpu_async_enabled) | |
202 | schedule_work(&pcpu_balance_work); | |
203 | } | |
a56dbddf | 204 | |
c0ebfdc3 | 205 | /** |
560f2c23 DZF |
206 | * pcpu_addr_in_chunk - check if the address is served from this chunk |
207 | * @chunk: chunk of interest | |
208 | * @addr: percpu address | |
c0ebfdc3 DZF |
209 | * |
210 | * RETURNS: | |
560f2c23 | 211 | * True if the address is served from this chunk. |
c0ebfdc3 | 212 | */ |
560f2c23 | 213 | static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) |
020ec653 | 214 | { |
c0ebfdc3 DZF |
215 | void *start_addr, *end_addr; |
216 | ||
560f2c23 | 217 | if (!chunk) |
c0ebfdc3 | 218 | return false; |
020ec653 | 219 | |
560f2c23 DZF |
220 | start_addr = chunk->base_addr + chunk->start_offset; |
221 | end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - | |
222 | chunk->end_offset; | |
c0ebfdc3 DZF |
223 | |
224 | return addr >= start_addr && addr < end_addr; | |
020ec653 TH |
225 | } |
226 | ||
d9b55eeb | 227 | static int __pcpu_size_to_slot(int size) |
fbf59bc9 | 228 | { |
cae3aeb8 | 229 | int highbit = fls(size); /* size is in bytes */ |
fbf59bc9 TH |
230 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); |
231 | } | |
232 | ||
d9b55eeb TH |
233 | static int pcpu_size_to_slot(int size) |
234 | { | |
235 | if (size == pcpu_unit_size) | |
236 | return pcpu_nr_slots - 1; | |
237 | return __pcpu_size_to_slot(size); | |
238 | } | |
239 | ||
fbf59bc9 TH |
240 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) |
241 | { | |
92c14cab DZ |
242 | const struct pcpu_block_md *chunk_md = &chunk->chunk_md; |
243 | ||
244 | if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || | |
245 | chunk_md->contig_hint == 0) | |
fbf59bc9 TH |
246 | return 0; |
247 | ||
92c14cab | 248 | return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); |
fbf59bc9 TH |
249 | } |
250 | ||
88999a89 TH |
251 | /* set the pointer to a chunk in a page struct */ |
252 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
253 | { | |
254 | page->index = (unsigned long)pcpu; | |
255 | } | |
256 | ||
257 | /* obtain pointer to a chunk from a page struct */ | |
258 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
259 | { | |
260 | return (struct pcpu_chunk *)page->index; | |
261 | } | |
262 | ||
263 | static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) | |
fbf59bc9 | 264 | { |
2f39e637 | 265 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
fbf59bc9 TH |
266 | } |
267 | ||
c0ebfdc3 DZF |
268 | static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) |
269 | { | |
270 | return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); | |
271 | } | |
272 | ||
9983b6f0 TH |
273 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
274 | unsigned int cpu, int page_idx) | |
fbf59bc9 | 275 | { |
c0ebfdc3 DZF |
276 | return (unsigned long)chunk->base_addr + |
277 | pcpu_unit_page_offset(cpu, page_idx); | |
fbf59bc9 TH |
278 | } |
279 | ||
ca460b3c DZF |
280 | /* |
281 | * The following are helper functions to help access bitmaps and convert | |
282 | * between bitmap offsets to address offsets. | |
283 | */ | |
284 | static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) | |
285 | { | |
286 | return chunk->alloc_map + | |
287 | (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); | |
288 | } | |
289 | ||
290 | static unsigned long pcpu_off_to_block_index(int off) | |
291 | { | |
292 | return off / PCPU_BITMAP_BLOCK_BITS; | |
293 | } | |
294 | ||
295 | static unsigned long pcpu_off_to_block_off(int off) | |
296 | { | |
297 | return off & (PCPU_BITMAP_BLOCK_BITS - 1); | |
298 | } | |
299 | ||
b185cd0d DZF |
300 | static unsigned long pcpu_block_off_to_off(int index, int off) |
301 | { | |
302 | return index * PCPU_BITMAP_BLOCK_BITS + off; | |
303 | } | |
304 | ||
382b88e9 DZ |
305 | /* |
306 | * pcpu_next_hint - determine which hint to use | |
307 | * @block: block of interest | |
308 | * @alloc_bits: size of allocation | |
309 | * | |
310 | * This determines if we should scan based on the scan_hint or first_free. | |
311 | * In general, we want to scan from first_free to fulfill allocations by | |
312 | * first fit. However, if we know a scan_hint at position scan_hint_start | |
313 | * cannot fulfill an allocation, we can begin scanning from there knowing | |
314 | * the contig_hint will be our fallback. | |
315 | */ | |
316 | static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) | |
317 | { | |
318 | /* | |
319 | * The three conditions below determine if we can skip past the | |
320 | * scan_hint. First, does the scan hint exist. Second, is the | |
321 | * contig_hint after the scan_hint (possibly not true iff | |
322 | * contig_hint == scan_hint). Third, is the allocation request | |
323 | * larger than the scan_hint. | |
324 | */ | |
325 | if (block->scan_hint && | |
326 | block->contig_hint_start > block->scan_hint_start && | |
327 | alloc_bits > block->scan_hint) | |
328 | return block->scan_hint_start + block->scan_hint; | |
329 | ||
330 | return block->first_free; | |
331 | } | |
332 | ||
525ca84d DZF |
333 | /** |
334 | * pcpu_next_md_free_region - finds the next hint free area | |
335 | * @chunk: chunk of interest | |
336 | * @bit_off: chunk offset | |
337 | * @bits: size of free area | |
338 | * | |
339 | * Helper function for pcpu_for_each_md_free_region. It checks | |
340 | * block->contig_hint and performs aggregation across blocks to find the | |
341 | * next hint. It modifies bit_off and bits in-place to be consumed in the | |
342 | * loop. | |
343 | */ | |
344 | static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, | |
345 | int *bits) | |
346 | { | |
347 | int i = pcpu_off_to_block_index(*bit_off); | |
348 | int block_off = pcpu_off_to_block_off(*bit_off); | |
349 | struct pcpu_block_md *block; | |
350 | ||
351 | *bits = 0; | |
352 | for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); | |
353 | block++, i++) { | |
354 | /* handles contig area across blocks */ | |
355 | if (*bits) { | |
356 | *bits += block->left_free; | |
357 | if (block->left_free == PCPU_BITMAP_BLOCK_BITS) | |
358 | continue; | |
359 | return; | |
360 | } | |
361 | ||
362 | /* | |
363 | * This checks three things. First is there a contig_hint to | |
364 | * check. Second, have we checked this hint before by | |
365 | * comparing the block_off. Third, is this the same as the | |
366 | * right contig hint. In the last case, it spills over into | |
367 | * the next block and should be handled by the contig area | |
368 | * across blocks code. | |
369 | */ | |
370 | *bits = block->contig_hint; | |
371 | if (*bits && block->contig_hint_start >= block_off && | |
372 | *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { | |
373 | *bit_off = pcpu_block_off_to_off(i, | |
374 | block->contig_hint_start); | |
375 | return; | |
376 | } | |
1fa4df3e DZ |
377 | /* reset to satisfy the second predicate above */ |
378 | block_off = 0; | |
525ca84d DZF |
379 | |
380 | *bits = block->right_free; | |
381 | *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; | |
382 | } | |
383 | } | |
384 | ||
b4c2116c DZF |
385 | /** |
386 | * pcpu_next_fit_region - finds fit areas for a given allocation request | |
387 | * @chunk: chunk of interest | |
388 | * @alloc_bits: size of allocation | |
389 | * @align: alignment of area (max PAGE_SIZE) | |
390 | * @bit_off: chunk offset | |
391 | * @bits: size of free area | |
392 | * | |
393 | * Finds the next free region that is viable for use with a given size and | |
394 | * alignment. This only returns if there is a valid area to be used for this | |
395 | * allocation. block->first_free is returned if the allocation request fits | |
396 | * within the block to see if the request can be fulfilled prior to the contig | |
397 | * hint. | |
398 | */ | |
399 | static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, | |
400 | int align, int *bit_off, int *bits) | |
401 | { | |
402 | int i = pcpu_off_to_block_index(*bit_off); | |
403 | int block_off = pcpu_off_to_block_off(*bit_off); | |
404 | struct pcpu_block_md *block; | |
405 | ||
406 | *bits = 0; | |
407 | for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); | |
408 | block++, i++) { | |
409 | /* handles contig area across blocks */ | |
410 | if (*bits) { | |
411 | *bits += block->left_free; | |
412 | if (*bits >= alloc_bits) | |
413 | return; | |
414 | if (block->left_free == PCPU_BITMAP_BLOCK_BITS) | |
415 | continue; | |
416 | } | |
417 | ||
418 | /* check block->contig_hint */ | |
419 | *bits = ALIGN(block->contig_hint_start, align) - | |
420 | block->contig_hint_start; | |
421 | /* | |
422 | * This uses the block offset to determine if this has been | |
423 | * checked in the prior iteration. | |
424 | */ | |
425 | if (block->contig_hint && | |
426 | block->contig_hint_start >= block_off && | |
427 | block->contig_hint >= *bits + alloc_bits) { | |
382b88e9 DZ |
428 | int start = pcpu_next_hint(block, alloc_bits); |
429 | ||
b4c2116c | 430 | *bits += alloc_bits + block->contig_hint_start - |
382b88e9 DZ |
431 | start; |
432 | *bit_off = pcpu_block_off_to_off(i, start); | |
b4c2116c DZF |
433 | return; |
434 | } | |
1fa4df3e DZ |
435 | /* reset to satisfy the second predicate above */ |
436 | block_off = 0; | |
b4c2116c DZF |
437 | |
438 | *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, | |
439 | align); | |
440 | *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; | |
441 | *bit_off = pcpu_block_off_to_off(i, *bit_off); | |
442 | if (*bits >= alloc_bits) | |
443 | return; | |
444 | } | |
445 | ||
446 | /* no valid offsets were found - fail condition */ | |
447 | *bit_off = pcpu_chunk_map_bits(chunk); | |
448 | } | |
449 | ||
525ca84d DZF |
450 | /* |
451 | * Metadata free area iterators. These perform aggregation of free areas | |
452 | * based on the metadata blocks and return the offset @bit_off and size in | |
b4c2116c DZF |
453 | * bits of the free area @bits. pcpu_for_each_fit_region only returns when |
454 | * a fit is found for the allocation request. | |
525ca84d DZF |
455 | */ |
456 | #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ | |
457 | for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ | |
458 | (bit_off) < pcpu_chunk_map_bits((chunk)); \ | |
459 | (bit_off) += (bits) + 1, \ | |
460 | pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) | |
461 | ||
b4c2116c DZF |
462 | #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ |
463 | for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ | |
464 | &(bits)); \ | |
465 | (bit_off) < pcpu_chunk_map_bits((chunk)); \ | |
466 | (bit_off) += (bits), \ | |
467 | pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ | |
468 | &(bits))) | |
469 | ||
fbf59bc9 | 470 | /** |
90459ce0 | 471 | * pcpu_mem_zalloc - allocate memory |
1880d93b | 472 | * @size: bytes to allocate |
47504ee0 | 473 | * @gfp: allocation flags |
fbf59bc9 | 474 | * |
1880d93b | 475 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, |
47504ee0 DZ |
476 | * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. |
477 | * This is to facilitate passing through whitelisted flags. The | |
478 | * returned memory is always zeroed. | |
fbf59bc9 TH |
479 | * |
480 | * RETURNS: | |
1880d93b | 481 | * Pointer to the allocated area on success, NULL on failure. |
fbf59bc9 | 482 | */ |
47504ee0 | 483 | static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) |
fbf59bc9 | 484 | { |
099a19d9 TH |
485 | if (WARN_ON_ONCE(!slab_is_available())) |
486 | return NULL; | |
487 | ||
1880d93b | 488 | if (size <= PAGE_SIZE) |
554fef1c | 489 | return kzalloc(size, gfp); |
7af4c093 | 490 | else |
88dca4ca | 491 | return __vmalloc(size, gfp | __GFP_ZERO); |
1880d93b | 492 | } |
fbf59bc9 | 493 | |
1880d93b TH |
494 | /** |
495 | * pcpu_mem_free - free memory | |
496 | * @ptr: memory to free | |
1880d93b | 497 | * |
90459ce0 | 498 | * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). |
1880d93b | 499 | */ |
1d5cfdb0 | 500 | static void pcpu_mem_free(void *ptr) |
1880d93b | 501 | { |
1d5cfdb0 | 502 | kvfree(ptr); |
fbf59bc9 TH |
503 | } |
504 | ||
8744d859 DZ |
505 | static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, |
506 | bool move_front) | |
507 | { | |
508 | if (chunk != pcpu_reserved_chunk) { | |
3c7be18a RG |
509 | struct list_head *pcpu_slot; |
510 | ||
511 | pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); | |
8744d859 DZ |
512 | if (move_front) |
513 | list_move(&chunk->list, &pcpu_slot[slot]); | |
514 | else | |
515 | list_move_tail(&chunk->list, &pcpu_slot[slot]); | |
516 | } | |
517 | } | |
518 | ||
519 | static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) | |
520 | { | |
521 | __pcpu_chunk_move(chunk, slot, true); | |
522 | } | |
523 | ||
fbf59bc9 TH |
524 | /** |
525 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
526 | * @chunk: chunk of interest | |
527 | * @oslot: the previous slot it was on | |
528 | * | |
529 | * This function is called after an allocation or free changed @chunk. | |
530 | * New slot according to the changed state is determined and @chunk is | |
edcb4639 TH |
531 | * moved to the slot. Note that the reserved chunk is never put on |
532 | * chunk slots. | |
ccea34b5 TH |
533 | * |
534 | * CONTEXT: | |
535 | * pcpu_lock. | |
fbf59bc9 TH |
536 | */ |
537 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
538 | { | |
539 | int nslot = pcpu_chunk_slot(chunk); | |
540 | ||
8744d859 DZ |
541 | if (oslot != nslot) |
542 | __pcpu_chunk_move(chunk, nslot, oslot < nslot); | |
833af842 TH |
543 | } |
544 | ||
b239f7da DZ |
545 | /* |
546 | * pcpu_update_empty_pages - update empty page counters | |
833af842 | 547 | * @chunk: chunk of interest |
b239f7da | 548 | * @nr: nr of empty pages |
833af842 | 549 | * |
b239f7da DZ |
550 | * This is used to keep track of the empty pages now based on the premise |
551 | * a md_block covers a page. The hint update functions recognize if a block | |
552 | * is made full or broken to calculate deltas for keeping track of free pages. | |
40064aec | 553 | */ |
b239f7da | 554 | static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) |
40064aec | 555 | { |
b239f7da DZ |
556 | chunk->nr_empty_pop_pages += nr; |
557 | if (chunk != pcpu_reserved_chunk) | |
558 | pcpu_nr_empty_pop_pages += nr; | |
40064aec DZF |
559 | } |
560 | ||
d9f3a01e DZ |
561 | /* |
562 | * pcpu_region_overlap - determines if two regions overlap | |
563 | * @a: start of first region, inclusive | |
564 | * @b: end of first region, exclusive | |
565 | * @x: start of second region, inclusive | |
566 | * @y: end of second region, exclusive | |
833af842 | 567 | * |
d9f3a01e DZ |
568 | * This is used to determine if the hint region [a, b) overlaps with the |
569 | * allocated region [x, y). | |
833af842 | 570 | */ |
d9f3a01e | 571 | static inline bool pcpu_region_overlap(int a, int b, int x, int y) |
833af842 | 572 | { |
d9f3a01e | 573 | return (a < y) && (x < b); |
40064aec | 574 | } |
9f7dcf22 | 575 | |
ca460b3c DZF |
576 | /** |
577 | * pcpu_block_update - updates a block given a free area | |
578 | * @block: block of interest | |
579 | * @start: start offset in block | |
580 | * @end: end offset in block | |
581 | * | |
582 | * Updates a block given a known free area. The region [start, end) is | |
268625a6 DZF |
583 | * expected to be the entirety of the free area within a block. Chooses |
584 | * the best starting offset if the contig hints are equal. | |
ca460b3c DZF |
585 | */ |
586 | static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) | |
587 | { | |
588 | int contig = end - start; | |
589 | ||
590 | block->first_free = min(block->first_free, start); | |
591 | if (start == 0) | |
592 | block->left_free = contig; | |
593 | ||
047924c9 | 594 | if (end == block->nr_bits) |
ca460b3c DZF |
595 | block->right_free = contig; |
596 | ||
597 | if (contig > block->contig_hint) { | |
382b88e9 DZ |
598 | /* promote the old contig_hint to be the new scan_hint */ |
599 | if (start > block->contig_hint_start) { | |
600 | if (block->contig_hint > block->scan_hint) { | |
601 | block->scan_hint_start = | |
602 | block->contig_hint_start; | |
603 | block->scan_hint = block->contig_hint; | |
604 | } else if (start < block->scan_hint_start) { | |
605 | /* | |
606 | * The old contig_hint == scan_hint. But, the | |
607 | * new contig is larger so hold the invariant | |
608 | * scan_hint_start < contig_hint_start. | |
609 | */ | |
610 | block->scan_hint = 0; | |
611 | } | |
612 | } else { | |
613 | block->scan_hint = 0; | |
614 | } | |
ca460b3c DZF |
615 | block->contig_hint_start = start; |
616 | block->contig_hint = contig; | |
382b88e9 DZ |
617 | } else if (contig == block->contig_hint) { |
618 | if (block->contig_hint_start && | |
619 | (!start || | |
620 | __ffs(start) > __ffs(block->contig_hint_start))) { | |
621 | /* start has a better alignment so use it */ | |
622 | block->contig_hint_start = start; | |
623 | if (start < block->scan_hint_start && | |
624 | block->contig_hint > block->scan_hint) | |
625 | block->scan_hint = 0; | |
626 | } else if (start > block->scan_hint_start || | |
627 | block->contig_hint > block->scan_hint) { | |
628 | /* | |
629 | * Knowing contig == contig_hint, update the scan_hint | |
630 | * if it is farther than or larger than the current | |
631 | * scan_hint. | |
632 | */ | |
633 | block->scan_hint_start = start; | |
634 | block->scan_hint = contig; | |
635 | } | |
636 | } else { | |
637 | /* | |
638 | * The region is smaller than the contig_hint. So only update | |
639 | * the scan_hint if it is larger than or equal and farther than | |
640 | * the current scan_hint. | |
641 | */ | |
642 | if ((start < block->contig_hint_start && | |
643 | (contig > block->scan_hint || | |
644 | (contig == block->scan_hint && | |
645 | start > block->scan_hint_start)))) { | |
646 | block->scan_hint_start = start; | |
647 | block->scan_hint = contig; | |
648 | } | |
ca460b3c DZF |
649 | } |
650 | } | |
651 | ||
b89462a9 DZ |
652 | /* |
653 | * pcpu_block_update_scan - update a block given a free area from a scan | |
654 | * @chunk: chunk of interest | |
655 | * @bit_off: chunk offset | |
656 | * @bits: size of free area | |
657 | * | |
658 | * Finding the final allocation spot first goes through pcpu_find_block_fit() | |
659 | * to find a block that can hold the allocation and then pcpu_alloc_area() | |
660 | * where a scan is used. When allocations require specific alignments, | |
661 | * we can inadvertently create holes which will not be seen in the alloc | |
662 | * or free paths. | |
663 | * | |
664 | * This takes a given free area hole and updates a block as it may change the | |
665 | * scan_hint. We need to scan backwards to ensure we don't miss free bits | |
666 | * from alignment. | |
667 | */ | |
668 | static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, | |
669 | int bits) | |
670 | { | |
671 | int s_off = pcpu_off_to_block_off(bit_off); | |
672 | int e_off = s_off + bits; | |
673 | int s_index, l_bit; | |
674 | struct pcpu_block_md *block; | |
675 | ||
676 | if (e_off > PCPU_BITMAP_BLOCK_BITS) | |
677 | return; | |
678 | ||
679 | s_index = pcpu_off_to_block_index(bit_off); | |
680 | block = chunk->md_blocks + s_index; | |
681 | ||
682 | /* scan backwards in case of alignment skipping free bits */ | |
683 | l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); | |
684 | s_off = (s_off == l_bit) ? 0 : l_bit + 1; | |
685 | ||
686 | pcpu_block_update(block, s_off, e_off); | |
687 | } | |
688 | ||
92c14cab DZ |
689 | /** |
690 | * pcpu_chunk_refresh_hint - updates metadata about a chunk | |
691 | * @chunk: chunk of interest | |
d33d9f3d | 692 | * @full_scan: if we should scan from the beginning |
92c14cab DZ |
693 | * |
694 | * Iterates over the metadata blocks to find the largest contig area. | |
d33d9f3d DZ |
695 | * A full scan can be avoided on the allocation path as this is triggered |
696 | * if we broke the contig_hint. In doing so, the scan_hint will be before | |
697 | * the contig_hint or after if the scan_hint == contig_hint. This cannot | |
698 | * be prevented on freeing as we want to find the largest area possibly | |
699 | * spanning blocks. | |
92c14cab | 700 | */ |
d33d9f3d | 701 | static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) |
92c14cab DZ |
702 | { |
703 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; | |
704 | int bit_off, bits; | |
705 | ||
d33d9f3d DZ |
706 | /* promote scan_hint to contig_hint */ |
707 | if (!full_scan && chunk_md->scan_hint) { | |
708 | bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; | |
709 | chunk_md->contig_hint_start = chunk_md->scan_hint_start; | |
710 | chunk_md->contig_hint = chunk_md->scan_hint; | |
711 | chunk_md->scan_hint = 0; | |
712 | } else { | |
713 | bit_off = chunk_md->first_free; | |
714 | chunk_md->contig_hint = 0; | |
715 | } | |
92c14cab | 716 | |
92c14cab | 717 | bits = 0; |
e837dfde | 718 | pcpu_for_each_md_free_region(chunk, bit_off, bits) |
92c14cab | 719 | pcpu_block_update(chunk_md, bit_off, bit_off + bits); |
ca460b3c DZF |
720 | } |
721 | ||
722 | /** | |
723 | * pcpu_block_refresh_hint | |
724 | * @chunk: chunk of interest | |
725 | * @index: index of the metadata block | |
726 | * | |
727 | * Scans over the block beginning at first_free and updates the block | |
728 | * metadata accordingly. | |
729 | */ | |
730 | static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) | |
731 | { | |
732 | struct pcpu_block_md *block = chunk->md_blocks + index; | |
733 | unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); | |
e837dfde | 734 | unsigned int rs, re, start; /* region start, region end */ |
da3afdd5 DZ |
735 | |
736 | /* promote scan_hint to contig_hint */ | |
737 | if (block->scan_hint) { | |
738 | start = block->scan_hint_start + block->scan_hint; | |
739 | block->contig_hint_start = block->scan_hint_start; | |
740 | block->contig_hint = block->scan_hint; | |
741 | block->scan_hint = 0; | |
742 | } else { | |
743 | start = block->first_free; | |
744 | block->contig_hint = 0; | |
745 | } | |
ca460b3c | 746 | |
da3afdd5 | 747 | block->right_free = 0; |
ca460b3c DZF |
748 | |
749 | /* iterate over free areas and update the contig hints */ | |
e837dfde DZ |
750 | bitmap_for_each_clear_region(alloc_map, rs, re, start, |
751 | PCPU_BITMAP_BLOCK_BITS) | |
ca460b3c | 752 | pcpu_block_update(block, rs, re); |
ca460b3c DZF |
753 | } |
754 | ||
755 | /** | |
756 | * pcpu_block_update_hint_alloc - update hint on allocation path | |
757 | * @chunk: chunk of interest | |
758 | * @bit_off: chunk offset | |
759 | * @bits: size of request | |
fc304334 DZF |
760 | * |
761 | * Updates metadata for the allocation path. The metadata only has to be | |
762 | * refreshed by a full scan iff the chunk's contig hint is broken. Block level | |
763 | * scans are required if the block's contig hint is broken. | |
ca460b3c DZF |
764 | */ |
765 | static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, | |
766 | int bits) | |
767 | { | |
92c14cab | 768 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; |
b239f7da | 769 | int nr_empty_pages = 0; |
ca460b3c DZF |
770 | struct pcpu_block_md *s_block, *e_block, *block; |
771 | int s_index, e_index; /* block indexes of the freed allocation */ | |
772 | int s_off, e_off; /* block offsets of the freed allocation */ | |
773 | ||
774 | /* | |
775 | * Calculate per block offsets. | |
776 | * The calculation uses an inclusive range, but the resulting offsets | |
777 | * are [start, end). e_index always points to the last block in the | |
778 | * range. | |
779 | */ | |
780 | s_index = pcpu_off_to_block_index(bit_off); | |
781 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
782 | s_off = pcpu_off_to_block_off(bit_off); | |
783 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
784 | ||
785 | s_block = chunk->md_blocks + s_index; | |
786 | e_block = chunk->md_blocks + e_index; | |
787 | ||
788 | /* | |
789 | * Update s_block. | |
fc304334 DZF |
790 | * block->first_free must be updated if the allocation takes its place. |
791 | * If the allocation breaks the contig_hint, a scan is required to | |
792 | * restore this hint. | |
ca460b3c | 793 | */ |
b239f7da DZ |
794 | if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) |
795 | nr_empty_pages++; | |
796 | ||
fc304334 DZF |
797 | if (s_off == s_block->first_free) |
798 | s_block->first_free = find_next_zero_bit( | |
799 | pcpu_index_alloc_map(chunk, s_index), | |
800 | PCPU_BITMAP_BLOCK_BITS, | |
801 | s_off + bits); | |
802 | ||
382b88e9 DZ |
803 | if (pcpu_region_overlap(s_block->scan_hint_start, |
804 | s_block->scan_hint_start + s_block->scan_hint, | |
805 | s_off, | |
806 | s_off + bits)) | |
807 | s_block->scan_hint = 0; | |
808 | ||
d9f3a01e DZ |
809 | if (pcpu_region_overlap(s_block->contig_hint_start, |
810 | s_block->contig_hint_start + | |
811 | s_block->contig_hint, | |
812 | s_off, | |
813 | s_off + bits)) { | |
fc304334 | 814 | /* block contig hint is broken - scan to fix it */ |
da3afdd5 DZ |
815 | if (!s_off) |
816 | s_block->left_free = 0; | |
fc304334 DZF |
817 | pcpu_block_refresh_hint(chunk, s_index); |
818 | } else { | |
819 | /* update left and right contig manually */ | |
820 | s_block->left_free = min(s_block->left_free, s_off); | |
821 | if (s_index == e_index) | |
822 | s_block->right_free = min_t(int, s_block->right_free, | |
823 | PCPU_BITMAP_BLOCK_BITS - e_off); | |
824 | else | |
825 | s_block->right_free = 0; | |
826 | } | |
ca460b3c DZF |
827 | |
828 | /* | |
829 | * Update e_block. | |
830 | */ | |
831 | if (s_index != e_index) { | |
b239f7da DZ |
832 | if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) |
833 | nr_empty_pages++; | |
834 | ||
fc304334 DZF |
835 | /* |
836 | * When the allocation is across blocks, the end is along | |
837 | * the left part of the e_block. | |
838 | */ | |
839 | e_block->first_free = find_next_zero_bit( | |
840 | pcpu_index_alloc_map(chunk, e_index), | |
841 | PCPU_BITMAP_BLOCK_BITS, e_off); | |
842 | ||
843 | if (e_off == PCPU_BITMAP_BLOCK_BITS) { | |
844 | /* reset the block */ | |
845 | e_block++; | |
846 | } else { | |
382b88e9 DZ |
847 | if (e_off > e_block->scan_hint_start) |
848 | e_block->scan_hint = 0; | |
849 | ||
da3afdd5 | 850 | e_block->left_free = 0; |
fc304334 DZF |
851 | if (e_off > e_block->contig_hint_start) { |
852 | /* contig hint is broken - scan to fix it */ | |
853 | pcpu_block_refresh_hint(chunk, e_index); | |
854 | } else { | |
fc304334 DZF |
855 | e_block->right_free = |
856 | min_t(int, e_block->right_free, | |
857 | PCPU_BITMAP_BLOCK_BITS - e_off); | |
858 | } | |
859 | } | |
ca460b3c DZF |
860 | |
861 | /* update in-between md_blocks */ | |
b239f7da | 862 | nr_empty_pages += (e_index - s_index - 1); |
ca460b3c | 863 | for (block = s_block + 1; block < e_block; block++) { |
382b88e9 | 864 | block->scan_hint = 0; |
ca460b3c DZF |
865 | block->contig_hint = 0; |
866 | block->left_free = 0; | |
867 | block->right_free = 0; | |
868 | } | |
869 | } | |
870 | ||
b239f7da DZ |
871 | if (nr_empty_pages) |
872 | pcpu_update_empty_pages(chunk, -nr_empty_pages); | |
873 | ||
d33d9f3d DZ |
874 | if (pcpu_region_overlap(chunk_md->scan_hint_start, |
875 | chunk_md->scan_hint_start + | |
876 | chunk_md->scan_hint, | |
877 | bit_off, | |
878 | bit_off + bits)) | |
879 | chunk_md->scan_hint = 0; | |
880 | ||
fc304334 DZF |
881 | /* |
882 | * The only time a full chunk scan is required is if the chunk | |
883 | * contig hint is broken. Otherwise, it means a smaller space | |
884 | * was used and therefore the chunk contig hint is still correct. | |
885 | */ | |
92c14cab DZ |
886 | if (pcpu_region_overlap(chunk_md->contig_hint_start, |
887 | chunk_md->contig_hint_start + | |
888 | chunk_md->contig_hint, | |
d9f3a01e DZ |
889 | bit_off, |
890 | bit_off + bits)) | |
d33d9f3d | 891 | pcpu_chunk_refresh_hint(chunk, false); |
ca460b3c DZF |
892 | } |
893 | ||
894 | /** | |
895 | * pcpu_block_update_hint_free - updates the block hints on the free path | |
896 | * @chunk: chunk of interest | |
897 | * @bit_off: chunk offset | |
898 | * @bits: size of request | |
b185cd0d DZF |
899 | * |
900 | * Updates metadata for the allocation path. This avoids a blind block | |
901 | * refresh by making use of the block contig hints. If this fails, it scans | |
902 | * forward and backward to determine the extent of the free area. This is | |
903 | * capped at the boundary of blocks. | |
904 | * | |
905 | * A chunk update is triggered if a page becomes free, a block becomes free, | |
906 | * or the free spans across blocks. This tradeoff is to minimize iterating | |
92c14cab DZ |
907 | * over the block metadata to update chunk_md->contig_hint. |
908 | * chunk_md->contig_hint may be off by up to a page, but it will never be more | |
909 | * than the available space. If the contig hint is contained in one block, it | |
910 | * will be accurate. | |
ca460b3c DZF |
911 | */ |
912 | static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, | |
913 | int bits) | |
914 | { | |
b239f7da | 915 | int nr_empty_pages = 0; |
ca460b3c DZF |
916 | struct pcpu_block_md *s_block, *e_block, *block; |
917 | int s_index, e_index; /* block indexes of the freed allocation */ | |
918 | int s_off, e_off; /* block offsets of the freed allocation */ | |
b185cd0d | 919 | int start, end; /* start and end of the whole free area */ |
ca460b3c DZF |
920 | |
921 | /* | |
922 | * Calculate per block offsets. | |
923 | * The calculation uses an inclusive range, but the resulting offsets | |
924 | * are [start, end). e_index always points to the last block in the | |
925 | * range. | |
926 | */ | |
927 | s_index = pcpu_off_to_block_index(bit_off); | |
928 | e_index = pcpu_off_to_block_index(bit_off + bits - 1); | |
929 | s_off = pcpu_off_to_block_off(bit_off); | |
930 | e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; | |
931 | ||
932 | s_block = chunk->md_blocks + s_index; | |
933 | e_block = chunk->md_blocks + e_index; | |
934 | ||
b185cd0d DZF |
935 | /* |
936 | * Check if the freed area aligns with the block->contig_hint. | |
937 | * If it does, then the scan to find the beginning/end of the | |
938 | * larger free area can be avoided. | |
939 | * | |
940 | * start and end refer to beginning and end of the free area | |
941 | * within each their respective blocks. This is not necessarily | |
942 | * the entire free area as it may span blocks past the beginning | |
943 | * or end of the block. | |
944 | */ | |
945 | start = s_off; | |
946 | if (s_off == s_block->contig_hint + s_block->contig_hint_start) { | |
947 | start = s_block->contig_hint_start; | |
948 | } else { | |
949 | /* | |
950 | * Scan backwards to find the extent of the free area. | |
951 | * find_last_bit returns the starting bit, so if the start bit | |
952 | * is returned, that means there was no last bit and the | |
953 | * remainder of the chunk is free. | |
954 | */ | |
955 | int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), | |
956 | start); | |
957 | start = (start == l_bit) ? 0 : l_bit + 1; | |
958 | } | |
959 | ||
960 | end = e_off; | |
961 | if (e_off == e_block->contig_hint_start) | |
962 | end = e_block->contig_hint_start + e_block->contig_hint; | |
963 | else | |
964 | end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), | |
965 | PCPU_BITMAP_BLOCK_BITS, end); | |
966 | ||
ca460b3c | 967 | /* update s_block */ |
b185cd0d | 968 | e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; |
b239f7da DZ |
969 | if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) |
970 | nr_empty_pages++; | |
b185cd0d | 971 | pcpu_block_update(s_block, start, e_off); |
ca460b3c DZF |
972 | |
973 | /* freeing in the same block */ | |
974 | if (s_index != e_index) { | |
975 | /* update e_block */ | |
b239f7da DZ |
976 | if (end == PCPU_BITMAP_BLOCK_BITS) |
977 | nr_empty_pages++; | |
b185cd0d | 978 | pcpu_block_update(e_block, 0, end); |
ca460b3c DZF |
979 | |
980 | /* reset md_blocks in the middle */ | |
b239f7da | 981 | nr_empty_pages += (e_index - s_index - 1); |
ca460b3c DZF |
982 | for (block = s_block + 1; block < e_block; block++) { |
983 | block->first_free = 0; | |
382b88e9 | 984 | block->scan_hint = 0; |
ca460b3c DZF |
985 | block->contig_hint_start = 0; |
986 | block->contig_hint = PCPU_BITMAP_BLOCK_BITS; | |
987 | block->left_free = PCPU_BITMAP_BLOCK_BITS; | |
988 | block->right_free = PCPU_BITMAP_BLOCK_BITS; | |
989 | } | |
990 | } | |
991 | ||
b239f7da DZ |
992 | if (nr_empty_pages) |
993 | pcpu_update_empty_pages(chunk, nr_empty_pages); | |
994 | ||
b185cd0d | 995 | /* |
b239f7da DZ |
996 | * Refresh chunk metadata when the free makes a block free or spans |
997 | * across blocks. The contig_hint may be off by up to a page, but if | |
998 | * the contig_hint is contained in a block, it will be accurate with | |
999 | * the else condition below. | |
b185cd0d | 1000 | */ |
b239f7da | 1001 | if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) |
d33d9f3d | 1002 | pcpu_chunk_refresh_hint(chunk, true); |
b185cd0d | 1003 | else |
92c14cab DZ |
1004 | pcpu_block_update(&chunk->chunk_md, |
1005 | pcpu_block_off_to_off(s_index, start), | |
1006 | end); | |
ca460b3c DZF |
1007 | } |
1008 | ||
40064aec DZF |
1009 | /** |
1010 | * pcpu_is_populated - determines if the region is populated | |
1011 | * @chunk: chunk of interest | |
1012 | * @bit_off: chunk offset | |
1013 | * @bits: size of area | |
1014 | * @next_off: return value for the next offset to start searching | |
1015 | * | |
1016 | * For atomic allocations, check if the backing pages are populated. | |
1017 | * | |
1018 | * RETURNS: | |
1019 | * Bool if the backing pages are populated. | |
1020 | * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. | |
1021 | */ | |
1022 | static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, | |
1023 | int *next_off) | |
1024 | { | |
e837dfde | 1025 | unsigned int page_start, page_end, rs, re; |
833af842 | 1026 | |
40064aec DZF |
1027 | page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); |
1028 | page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); | |
833af842 | 1029 | |
40064aec | 1030 | rs = page_start; |
e837dfde | 1031 | bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); |
40064aec DZF |
1032 | if (rs >= page_end) |
1033 | return true; | |
833af842 | 1034 | |
40064aec DZF |
1035 | *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; |
1036 | return false; | |
9f7dcf22 TH |
1037 | } |
1038 | ||
a16037c8 | 1039 | /** |
40064aec DZF |
1040 | * pcpu_find_block_fit - finds the block index to start searching |
1041 | * @chunk: chunk of interest | |
1042 | * @alloc_bits: size of request in allocation units | |
1043 | * @align: alignment of area (max PAGE_SIZE bytes) | |
1044 | * @pop_only: use populated regions only | |
1045 | * | |
b4c2116c DZF |
1046 | * Given a chunk and an allocation spec, find the offset to begin searching |
1047 | * for a free region. This iterates over the bitmap metadata blocks to | |
1048 | * find an offset that will be guaranteed to fit the requirements. It is | |
1049 | * not quite first fit as if the allocation does not fit in the contig hint | |
1050 | * of a block or chunk, it is skipped. This errs on the side of caution | |
1051 | * to prevent excess iteration. Poor alignment can cause the allocator to | |
1052 | * skip over blocks and chunks that have valid free areas. | |
1053 | * | |
40064aec DZF |
1054 | * RETURNS: |
1055 | * The offset in the bitmap to begin searching. | |
1056 | * -1 if no offset is found. | |
a16037c8 | 1057 | */ |
40064aec DZF |
1058 | static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, |
1059 | size_t align, bool pop_only) | |
a16037c8 | 1060 | { |
92c14cab | 1061 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; |
b4c2116c | 1062 | int bit_off, bits, next_off; |
a16037c8 | 1063 | |
13f96637 DZF |
1064 | /* |
1065 | * Check to see if the allocation can fit in the chunk's contig hint. | |
1066 | * This is an optimization to prevent scanning by assuming if it | |
1067 | * cannot fit in the global hint, there is memory pressure and creating | |
1068 | * a new chunk would happen soon. | |
1069 | */ | |
92c14cab DZ |
1070 | bit_off = ALIGN(chunk_md->contig_hint_start, align) - |
1071 | chunk_md->contig_hint_start; | |
1072 | if (bit_off + alloc_bits > chunk_md->contig_hint) | |
13f96637 DZF |
1073 | return -1; |
1074 | ||
d33d9f3d | 1075 | bit_off = pcpu_next_hint(chunk_md, alloc_bits); |
b4c2116c DZF |
1076 | bits = 0; |
1077 | pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { | |
40064aec | 1078 | if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, |
b4c2116c | 1079 | &next_off)) |
40064aec | 1080 | break; |
a16037c8 | 1081 | |
b4c2116c | 1082 | bit_off = next_off; |
40064aec | 1083 | bits = 0; |
a16037c8 | 1084 | } |
40064aec DZF |
1085 | |
1086 | if (bit_off == pcpu_chunk_map_bits(chunk)) | |
1087 | return -1; | |
1088 | ||
1089 | return bit_off; | |
a16037c8 TH |
1090 | } |
1091 | ||
b89462a9 DZ |
1092 | /* |
1093 | * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() | |
1094 | * @map: the address to base the search on | |
1095 | * @size: the bitmap size in bits | |
1096 | * @start: the bitnumber to start searching at | |
1097 | * @nr: the number of zeroed bits we're looking for | |
1098 | * @align_mask: alignment mask for zero area | |
1099 | * @largest_off: offset of the largest area skipped | |
1100 | * @largest_bits: size of the largest area skipped | |
1101 | * | |
1102 | * The @align_mask should be one less than a power of 2. | |
1103 | * | |
1104 | * This is a modified version of bitmap_find_next_zero_area_off() to remember | |
1105 | * the largest area that was skipped. This is imperfect, but in general is | |
1106 | * good enough. The largest remembered region is the largest failed region | |
1107 | * seen. This does not include anything we possibly skipped due to alignment. | |
1108 | * pcpu_block_update_scan() does scan backwards to try and recover what was | |
1109 | * lost to alignment. While this can cause scanning to miss earlier possible | |
1110 | * free areas, smaller allocations will eventually fill those holes. | |
1111 | */ | |
1112 | static unsigned long pcpu_find_zero_area(unsigned long *map, | |
1113 | unsigned long size, | |
1114 | unsigned long start, | |
1115 | unsigned long nr, | |
1116 | unsigned long align_mask, | |
1117 | unsigned long *largest_off, | |
1118 | unsigned long *largest_bits) | |
1119 | { | |
1120 | unsigned long index, end, i, area_off, area_bits; | |
1121 | again: | |
1122 | index = find_next_zero_bit(map, size, start); | |
1123 | ||
1124 | /* Align allocation */ | |
1125 | index = __ALIGN_MASK(index, align_mask); | |
1126 | area_off = index; | |
1127 | ||
1128 | end = index + nr; | |
1129 | if (end > size) | |
1130 | return end; | |
1131 | i = find_next_bit(map, end, index); | |
1132 | if (i < end) { | |
1133 | area_bits = i - area_off; | |
1134 | /* remember largest unused area with best alignment */ | |
1135 | if (area_bits > *largest_bits || | |
1136 | (area_bits == *largest_bits && *largest_off && | |
1137 | (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { | |
1138 | *largest_off = area_off; | |
1139 | *largest_bits = area_bits; | |
1140 | } | |
1141 | ||
1142 | start = i + 1; | |
1143 | goto again; | |
1144 | } | |
1145 | return index; | |
1146 | } | |
1147 | ||
fbf59bc9 | 1148 | /** |
40064aec | 1149 | * pcpu_alloc_area - allocates an area from a pcpu_chunk |
fbf59bc9 | 1150 | * @chunk: chunk of interest |
40064aec DZF |
1151 | * @alloc_bits: size of request in allocation units |
1152 | * @align: alignment of area (max PAGE_SIZE) | |
1153 | * @start: bit_off to start searching | |
9f7dcf22 | 1154 | * |
40064aec | 1155 | * This function takes in a @start offset to begin searching to fit an |
b4c2116c DZF |
1156 | * allocation of @alloc_bits with alignment @align. It needs to scan |
1157 | * the allocation map because if it fits within the block's contig hint, | |
1158 | * @start will be block->first_free. This is an attempt to fill the | |
1159 | * allocation prior to breaking the contig hint. The allocation and | |
1160 | * boundary maps are updated accordingly if it confirms a valid | |
1161 | * free area. | |
ccea34b5 | 1162 | * |
fbf59bc9 | 1163 | * RETURNS: |
40064aec DZF |
1164 | * Allocated addr offset in @chunk on success. |
1165 | * -1 if no matching area is found. | |
fbf59bc9 | 1166 | */ |
40064aec DZF |
1167 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, |
1168 | size_t align, int start) | |
fbf59bc9 | 1169 | { |
92c14cab | 1170 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; |
40064aec | 1171 | size_t align_mask = (align) ? (align - 1) : 0; |
b89462a9 | 1172 | unsigned long area_off = 0, area_bits = 0; |
40064aec | 1173 | int bit_off, end, oslot; |
a16037c8 | 1174 | |
40064aec | 1175 | lockdep_assert_held(&pcpu_lock); |
fbf59bc9 | 1176 | |
40064aec | 1177 | oslot = pcpu_chunk_slot(chunk); |
fbf59bc9 | 1178 | |
40064aec DZF |
1179 | /* |
1180 | * Search to find a fit. | |
1181 | */ | |
8c43004a DZ |
1182 | end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, |
1183 | pcpu_chunk_map_bits(chunk)); | |
b89462a9 DZ |
1184 | bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, |
1185 | align_mask, &area_off, &area_bits); | |
40064aec DZF |
1186 | if (bit_off >= end) |
1187 | return -1; | |
fbf59bc9 | 1188 | |
b89462a9 DZ |
1189 | if (area_bits) |
1190 | pcpu_block_update_scan(chunk, area_off, area_bits); | |
1191 | ||
40064aec DZF |
1192 | /* update alloc map */ |
1193 | bitmap_set(chunk->alloc_map, bit_off, alloc_bits); | |
3d331ad7 | 1194 | |
40064aec DZF |
1195 | /* update boundary map */ |
1196 | set_bit(bit_off, chunk->bound_map); | |
1197 | bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); | |
1198 | set_bit(bit_off + alloc_bits, chunk->bound_map); | |
fbf59bc9 | 1199 | |
40064aec | 1200 | chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; |
fbf59bc9 | 1201 | |
86b442fb | 1202 | /* update first free bit */ |
92c14cab DZ |
1203 | if (bit_off == chunk_md->first_free) |
1204 | chunk_md->first_free = find_next_zero_bit( | |
86b442fb DZF |
1205 | chunk->alloc_map, |
1206 | pcpu_chunk_map_bits(chunk), | |
1207 | bit_off + alloc_bits); | |
1208 | ||
ca460b3c | 1209 | pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); |
fbf59bc9 | 1210 | |
fbf59bc9 TH |
1211 | pcpu_chunk_relocate(chunk, oslot); |
1212 | ||
40064aec | 1213 | return bit_off * PCPU_MIN_ALLOC_SIZE; |
fbf59bc9 TH |
1214 | } |
1215 | ||
1216 | /** | |
40064aec | 1217 | * pcpu_free_area - frees the corresponding offset |
fbf59bc9 | 1218 | * @chunk: chunk of interest |
40064aec | 1219 | * @off: addr offset into chunk |
ccea34b5 | 1220 | * |
40064aec DZF |
1221 | * This function determines the size of an allocation to free using |
1222 | * the boundary bitmap and clears the allocation map. | |
5b32af91 RG |
1223 | * |
1224 | * RETURNS: | |
1225 | * Number of freed bytes. | |
fbf59bc9 | 1226 | */ |
5b32af91 | 1227 | static int pcpu_free_area(struct pcpu_chunk *chunk, int off) |
fbf59bc9 | 1228 | { |
92c14cab | 1229 | struct pcpu_block_md *chunk_md = &chunk->chunk_md; |
5b32af91 | 1230 | int bit_off, bits, end, oslot, freed; |
723ad1d9 | 1231 | |
5ccd30e4 | 1232 | lockdep_assert_held(&pcpu_lock); |
30a5b536 | 1233 | pcpu_stats_area_dealloc(chunk); |
5ccd30e4 | 1234 | |
40064aec | 1235 | oslot = pcpu_chunk_slot(chunk); |
fbf59bc9 | 1236 | |
40064aec | 1237 | bit_off = off / PCPU_MIN_ALLOC_SIZE; |
3d331ad7 | 1238 | |
40064aec DZF |
1239 | /* find end index */ |
1240 | end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), | |
1241 | bit_off + 1); | |
1242 | bits = end - bit_off; | |
1243 | bitmap_clear(chunk->alloc_map, bit_off, bits); | |
fbf59bc9 | 1244 | |
5b32af91 RG |
1245 | freed = bits * PCPU_MIN_ALLOC_SIZE; |
1246 | ||
40064aec | 1247 | /* update metadata */ |
5b32af91 | 1248 | chunk->free_bytes += freed; |
b539b87f | 1249 | |
86b442fb | 1250 | /* update first free bit */ |
92c14cab | 1251 | chunk_md->first_free = min(chunk_md->first_free, bit_off); |
86b442fb | 1252 | |
ca460b3c | 1253 | pcpu_block_update_hint_free(chunk, bit_off, bits); |
fbf59bc9 | 1254 | |
fbf59bc9 | 1255 | pcpu_chunk_relocate(chunk, oslot); |
5b32af91 RG |
1256 | |
1257 | return freed; | |
fbf59bc9 TH |
1258 | } |
1259 | ||
047924c9 DZ |
1260 | static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) |
1261 | { | |
1262 | block->scan_hint = 0; | |
1263 | block->contig_hint = nr_bits; | |
1264 | block->left_free = nr_bits; | |
1265 | block->right_free = nr_bits; | |
1266 | block->first_free = 0; | |
1267 | block->nr_bits = nr_bits; | |
1268 | } | |
1269 | ||
ca460b3c DZF |
1270 | static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) |
1271 | { | |
1272 | struct pcpu_block_md *md_block; | |
1273 | ||
92c14cab DZ |
1274 | /* init the chunk's block */ |
1275 | pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); | |
1276 | ||
ca460b3c DZF |
1277 | for (md_block = chunk->md_blocks; |
1278 | md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); | |
047924c9 DZ |
1279 | md_block++) |
1280 | pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); | |
ca460b3c DZF |
1281 | } |
1282 | ||
40064aec DZF |
1283 | /** |
1284 | * pcpu_alloc_first_chunk - creates chunks that serve the first chunk | |
1285 | * @tmp_addr: the start of the region served | |
1286 | * @map_size: size of the region served | |
1287 | * | |
1288 | * This is responsible for creating the chunks that serve the first chunk. The | |
1289 | * base_addr is page aligned down of @tmp_addr while the region end is page | |
1290 | * aligned up. Offsets are kept track of to determine the region served. All | |
1291 | * this is done to appease the bitmap allocator in avoiding partial blocks. | |
1292 | * | |
1293 | * RETURNS: | |
1294 | * Chunk serving the region at @tmp_addr of @map_size. | |
1295 | */ | |
c0ebfdc3 | 1296 | static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, |
40064aec | 1297 | int map_size) |
10edf5b0 DZF |
1298 | { |
1299 | struct pcpu_chunk *chunk; | |
ca460b3c | 1300 | unsigned long aligned_addr, lcm_align; |
40064aec | 1301 | int start_offset, offset_bits, region_size, region_bits; |
f655f405 | 1302 | size_t alloc_size; |
c0ebfdc3 DZF |
1303 | |
1304 | /* region calculations */ | |
1305 | aligned_addr = tmp_addr & PAGE_MASK; | |
1306 | ||
1307 | start_offset = tmp_addr - aligned_addr; | |
6b9d7c8e | 1308 | |
ca460b3c DZF |
1309 | /* |
1310 | * Align the end of the region with the LCM of PAGE_SIZE and | |
1311 | * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of | |
1312 | * the other. | |
1313 | */ | |
1314 | lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); | |
1315 | region_size = ALIGN(start_offset + map_size, lcm_align); | |
10edf5b0 | 1316 | |
c0ebfdc3 | 1317 | /* allocate chunk */ |
61cf93d3 DZ |
1318 | alloc_size = struct_size(chunk, populated, |
1319 | BITS_TO_LONGS(region_size >> PAGE_SHIFT)); | |
f655f405 MR |
1320 | chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); |
1321 | if (!chunk) | |
1322 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1323 | alloc_size); | |
c0ebfdc3 | 1324 | |
10edf5b0 | 1325 | INIT_LIST_HEAD(&chunk->list); |
c0ebfdc3 DZF |
1326 | |
1327 | chunk->base_addr = (void *)aligned_addr; | |
10edf5b0 | 1328 | chunk->start_offset = start_offset; |
6b9d7c8e | 1329 | chunk->end_offset = region_size - chunk->start_offset - map_size; |
c0ebfdc3 | 1330 | |
8ab16c43 | 1331 | chunk->nr_pages = region_size >> PAGE_SHIFT; |
40064aec | 1332 | region_bits = pcpu_chunk_map_bits(chunk); |
c0ebfdc3 | 1333 | |
f655f405 MR |
1334 | alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); |
1335 | chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1336 | if (!chunk->alloc_map) | |
1337 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1338 | alloc_size); | |
1339 | ||
1340 | alloc_size = | |
1341 | BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); | |
1342 | chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1343 | if (!chunk->bound_map) | |
1344 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1345 | alloc_size); | |
1346 | ||
1347 | alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); | |
1348 | chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
1349 | if (!chunk->md_blocks) | |
1350 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
1351 | alloc_size); | |
1352 | ||
3c7be18a RG |
1353 | #ifdef CONFIG_MEMCG_KMEM |
1354 | /* first chunk isn't memcg-aware */ | |
1355 | chunk->obj_cgroups = NULL; | |
1356 | #endif | |
ca460b3c | 1357 | pcpu_init_md_blocks(chunk); |
10edf5b0 DZF |
1358 | |
1359 | /* manage populated page bitmap */ | |
1360 | chunk->immutable = true; | |
8ab16c43 DZF |
1361 | bitmap_fill(chunk->populated, chunk->nr_pages); |
1362 | chunk->nr_populated = chunk->nr_pages; | |
b239f7da | 1363 | chunk->nr_empty_pop_pages = chunk->nr_pages; |
10edf5b0 | 1364 | |
40064aec | 1365 | chunk->free_bytes = map_size; |
c0ebfdc3 DZF |
1366 | |
1367 | if (chunk->start_offset) { | |
1368 | /* hide the beginning of the bitmap */ | |
40064aec DZF |
1369 | offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; |
1370 | bitmap_set(chunk->alloc_map, 0, offset_bits); | |
1371 | set_bit(0, chunk->bound_map); | |
1372 | set_bit(offset_bits, chunk->bound_map); | |
ca460b3c | 1373 | |
92c14cab | 1374 | chunk->chunk_md.first_free = offset_bits; |
86b442fb | 1375 | |
ca460b3c | 1376 | pcpu_block_update_hint_alloc(chunk, 0, offset_bits); |
c0ebfdc3 DZF |
1377 | } |
1378 | ||
6b9d7c8e DZF |
1379 | if (chunk->end_offset) { |
1380 | /* hide the end of the bitmap */ | |
40064aec DZF |
1381 | offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; |
1382 | bitmap_set(chunk->alloc_map, | |
1383 | pcpu_chunk_map_bits(chunk) - offset_bits, | |
1384 | offset_bits); | |
1385 | set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, | |
1386 | chunk->bound_map); | |
1387 | set_bit(region_bits, chunk->bound_map); | |
6b9d7c8e | 1388 | |
ca460b3c DZF |
1389 | pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) |
1390 | - offset_bits, offset_bits); | |
1391 | } | |
40064aec | 1392 | |
10edf5b0 DZF |
1393 | return chunk; |
1394 | } | |
1395 | ||
3c7be18a | 1396 | static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp) |
6081089f TH |
1397 | { |
1398 | struct pcpu_chunk *chunk; | |
40064aec | 1399 | int region_bits; |
6081089f | 1400 | |
47504ee0 | 1401 | chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); |
6081089f TH |
1402 | if (!chunk) |
1403 | return NULL; | |
1404 | ||
40064aec DZF |
1405 | INIT_LIST_HEAD(&chunk->list); |
1406 | chunk->nr_pages = pcpu_unit_pages; | |
1407 | region_bits = pcpu_chunk_map_bits(chunk); | |
6081089f | 1408 | |
40064aec | 1409 | chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * |
47504ee0 | 1410 | sizeof(chunk->alloc_map[0]), gfp); |
40064aec DZF |
1411 | if (!chunk->alloc_map) |
1412 | goto alloc_map_fail; | |
6081089f | 1413 | |
40064aec | 1414 | chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * |
47504ee0 | 1415 | sizeof(chunk->bound_map[0]), gfp); |
40064aec DZF |
1416 | if (!chunk->bound_map) |
1417 | goto bound_map_fail; | |
6081089f | 1418 | |
ca460b3c | 1419 | chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * |
47504ee0 | 1420 | sizeof(chunk->md_blocks[0]), gfp); |
ca460b3c DZF |
1421 | if (!chunk->md_blocks) |
1422 | goto md_blocks_fail; | |
1423 | ||
3c7be18a RG |
1424 | #ifdef CONFIG_MEMCG_KMEM |
1425 | if (pcpu_is_memcg_chunk(type)) { | |
1426 | chunk->obj_cgroups = | |
1427 | pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * | |
1428 | sizeof(struct obj_cgroup *), gfp); | |
1429 | if (!chunk->obj_cgroups) | |
1430 | goto objcg_fail; | |
1431 | } | |
1432 | #endif | |
1433 | ||
ca460b3c DZF |
1434 | pcpu_init_md_blocks(chunk); |
1435 | ||
40064aec | 1436 | /* init metadata */ |
40064aec | 1437 | chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; |
c0ebfdc3 | 1438 | |
6081089f | 1439 | return chunk; |
40064aec | 1440 | |
3c7be18a RG |
1441 | #ifdef CONFIG_MEMCG_KMEM |
1442 | objcg_fail: | |
1443 | pcpu_mem_free(chunk->md_blocks); | |
1444 | #endif | |
ca460b3c DZF |
1445 | md_blocks_fail: |
1446 | pcpu_mem_free(chunk->bound_map); | |
40064aec DZF |
1447 | bound_map_fail: |
1448 | pcpu_mem_free(chunk->alloc_map); | |
1449 | alloc_map_fail: | |
1450 | pcpu_mem_free(chunk); | |
1451 | ||
1452 | return NULL; | |
6081089f TH |
1453 | } |
1454 | ||
1455 | static void pcpu_free_chunk(struct pcpu_chunk *chunk) | |
1456 | { | |
1457 | if (!chunk) | |
1458 | return; | |
3c7be18a RG |
1459 | #ifdef CONFIG_MEMCG_KMEM |
1460 | pcpu_mem_free(chunk->obj_cgroups); | |
1461 | #endif | |
6685b357 | 1462 | pcpu_mem_free(chunk->md_blocks); |
40064aec DZF |
1463 | pcpu_mem_free(chunk->bound_map); |
1464 | pcpu_mem_free(chunk->alloc_map); | |
1d5cfdb0 | 1465 | pcpu_mem_free(chunk); |
6081089f TH |
1466 | } |
1467 | ||
b539b87f TH |
1468 | /** |
1469 | * pcpu_chunk_populated - post-population bookkeeping | |
1470 | * @chunk: pcpu_chunk which got populated | |
1471 | * @page_start: the start page | |
1472 | * @page_end: the end page | |
1473 | * | |
1474 | * Pages in [@page_start,@page_end) have been populated to @chunk. Update | |
1475 | * the bookkeeping information accordingly. Must be called after each | |
1476 | * successful population. | |
40064aec DZF |
1477 | * |
1478 | * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it | |
1479 | * is to serve an allocation in that area. | |
b539b87f | 1480 | */ |
40064aec | 1481 | static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, |
b239f7da | 1482 | int page_end) |
b539b87f TH |
1483 | { |
1484 | int nr = page_end - page_start; | |
1485 | ||
1486 | lockdep_assert_held(&pcpu_lock); | |
1487 | ||
1488 | bitmap_set(chunk->populated, page_start, nr); | |
1489 | chunk->nr_populated += nr; | |
7e8a6304 | 1490 | pcpu_nr_populated += nr; |
40064aec | 1491 | |
b239f7da | 1492 | pcpu_update_empty_pages(chunk, nr); |
b539b87f TH |
1493 | } |
1494 | ||
1495 | /** | |
1496 | * pcpu_chunk_depopulated - post-depopulation bookkeeping | |
1497 | * @chunk: pcpu_chunk which got depopulated | |
1498 | * @page_start: the start page | |
1499 | * @page_end: the end page | |
1500 | * | |
1501 | * Pages in [@page_start,@page_end) have been depopulated from @chunk. | |
1502 | * Update the bookkeeping information accordingly. Must be called after | |
1503 | * each successful depopulation. | |
1504 | */ | |
1505 | static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, | |
1506 | int page_start, int page_end) | |
1507 | { | |
1508 | int nr = page_end - page_start; | |
1509 | ||
1510 | lockdep_assert_held(&pcpu_lock); | |
1511 | ||
1512 | bitmap_clear(chunk->populated, page_start, nr); | |
1513 | chunk->nr_populated -= nr; | |
7e8a6304 | 1514 | pcpu_nr_populated -= nr; |
b239f7da DZ |
1515 | |
1516 | pcpu_update_empty_pages(chunk, -nr); | |
b539b87f TH |
1517 | } |
1518 | ||
9f645532 TH |
1519 | /* |
1520 | * Chunk management implementation. | |
1521 | * | |
1522 | * To allow different implementations, chunk alloc/free and | |
1523 | * [de]population are implemented in a separate file which is pulled | |
1524 | * into this file and compiled together. The following functions | |
1525 | * should be implemented. | |
1526 | * | |
1527 | * pcpu_populate_chunk - populate the specified range of a chunk | |
1528 | * pcpu_depopulate_chunk - depopulate the specified range of a chunk | |
1529 | * pcpu_create_chunk - create a new chunk | |
1530 | * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop | |
1531 | * pcpu_addr_to_page - translate address to physical address | |
1532 | * pcpu_verify_alloc_info - check alloc_info is acceptable during init | |
fbf59bc9 | 1533 | */ |
15d9f3d1 | 1534 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, |
47504ee0 | 1535 | int page_start, int page_end, gfp_t gfp); |
15d9f3d1 DZ |
1536 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, |
1537 | int page_start, int page_end); | |
3c7be18a RG |
1538 | static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, |
1539 | gfp_t gfp); | |
9f645532 TH |
1540 | static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); |
1541 | static struct page *pcpu_addr_to_page(void *addr); | |
1542 | static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); | |
fbf59bc9 | 1543 | |
b0c9778b TH |
1544 | #ifdef CONFIG_NEED_PER_CPU_KM |
1545 | #include "percpu-km.c" | |
1546 | #else | |
9f645532 | 1547 | #include "percpu-vm.c" |
b0c9778b | 1548 | #endif |
fbf59bc9 | 1549 | |
88999a89 TH |
1550 | /** |
1551 | * pcpu_chunk_addr_search - determine chunk containing specified address | |
1552 | * @addr: address for which the chunk needs to be determined. | |
1553 | * | |
c0ebfdc3 DZF |
1554 | * This is an internal function that handles all but static allocations. |
1555 | * Static percpu address values should never be passed into the allocator. | |
1556 | * | |
88999a89 TH |
1557 | * RETURNS: |
1558 | * The address of the found chunk. | |
1559 | */ | |
1560 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
1561 | { | |
c0ebfdc3 | 1562 | /* is it in the dynamic region (first chunk)? */ |
560f2c23 | 1563 | if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) |
88999a89 | 1564 | return pcpu_first_chunk; |
c0ebfdc3 DZF |
1565 | |
1566 | /* is it in the reserved region? */ | |
560f2c23 | 1567 | if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) |
c0ebfdc3 | 1568 | return pcpu_reserved_chunk; |
88999a89 TH |
1569 | |
1570 | /* | |
1571 | * The address is relative to unit0 which might be unused and | |
1572 | * thus unmapped. Offset the address to the unit space of the | |
1573 | * current processor before looking it up in the vmalloc | |
1574 | * space. Note that any possible cpu id can be used here, so | |
1575 | * there's no need to worry about preemption or cpu hotplug. | |
1576 | */ | |
1577 | addr += pcpu_unit_offsets[raw_smp_processor_id()]; | |
9f645532 | 1578 | return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); |
88999a89 TH |
1579 | } |
1580 | ||
3c7be18a RG |
1581 | #ifdef CONFIG_MEMCG_KMEM |
1582 | static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, | |
1583 | struct obj_cgroup **objcgp) | |
1584 | { | |
1585 | struct obj_cgroup *objcg; | |
1586 | ||
279c3393 | 1587 | if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) |
3c7be18a RG |
1588 | return PCPU_CHUNK_ROOT; |
1589 | ||
1590 | objcg = get_obj_cgroup_from_current(); | |
1591 | if (!objcg) | |
1592 | return PCPU_CHUNK_ROOT; | |
1593 | ||
1594 | if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { | |
1595 | obj_cgroup_put(objcg); | |
1596 | return PCPU_FAIL_ALLOC; | |
1597 | } | |
1598 | ||
1599 | *objcgp = objcg; | |
1600 | return PCPU_CHUNK_MEMCG; | |
1601 | } | |
1602 | ||
1603 | static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, | |
1604 | struct pcpu_chunk *chunk, int off, | |
1605 | size_t size) | |
1606 | { | |
1607 | if (!objcg) | |
1608 | return; | |
1609 | ||
1610 | if (chunk) { | |
1611 | chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; | |
772616b0 RG |
1612 | |
1613 | rcu_read_lock(); | |
1614 | mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, | |
1615 | size * num_possible_cpus()); | |
1616 | rcu_read_unlock(); | |
3c7be18a RG |
1617 | } else { |
1618 | obj_cgroup_uncharge(objcg, size * num_possible_cpus()); | |
1619 | obj_cgroup_put(objcg); | |
1620 | } | |
1621 | } | |
1622 | ||
1623 | static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) | |
1624 | { | |
1625 | struct obj_cgroup *objcg; | |
1626 | ||
1627 | if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk))) | |
1628 | return; | |
1629 | ||
1630 | objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; | |
1631 | chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; | |
1632 | ||
1633 | obj_cgroup_uncharge(objcg, size * num_possible_cpus()); | |
1634 | ||
772616b0 RG |
1635 | rcu_read_lock(); |
1636 | mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, | |
1637 | -(size * num_possible_cpus())); | |
1638 | rcu_read_unlock(); | |
1639 | ||
3c7be18a RG |
1640 | obj_cgroup_put(objcg); |
1641 | } | |
1642 | ||
1643 | #else /* CONFIG_MEMCG_KMEM */ | |
1644 | static enum pcpu_chunk_type | |
1645 | pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) | |
1646 | { | |
1647 | return PCPU_CHUNK_ROOT; | |
1648 | } | |
1649 | ||
1650 | static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, | |
1651 | struct pcpu_chunk *chunk, int off, | |
1652 | size_t size) | |
1653 | { | |
1654 | } | |
1655 | ||
1656 | static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) | |
1657 | { | |
1658 | } | |
1659 | #endif /* CONFIG_MEMCG_KMEM */ | |
1660 | ||
fbf59bc9 | 1661 | /** |
edcb4639 | 1662 | * pcpu_alloc - the percpu allocator |
cae3aeb8 | 1663 | * @size: size of area to allocate in bytes |
fbf59bc9 | 1664 | * @align: alignment of area (max PAGE_SIZE) |
edcb4639 | 1665 | * @reserved: allocate from the reserved chunk if available |
5835d96e | 1666 | * @gfp: allocation flags |
fbf59bc9 | 1667 | * |
5835d96e | 1668 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't |
0ea7eeec DB |
1669 | * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN |
1670 | * then no warning will be triggered on invalid or failed allocation | |
1671 | * requests. | |
fbf59bc9 TH |
1672 | * |
1673 | * RETURNS: | |
1674 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1675 | */ | |
5835d96e TH |
1676 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
1677 | gfp_t gfp) | |
fbf59bc9 | 1678 | { |
28307d93 FM |
1679 | gfp_t pcpu_gfp; |
1680 | bool is_atomic; | |
1681 | bool do_warn; | |
3c7be18a RG |
1682 | enum pcpu_chunk_type type; |
1683 | struct list_head *pcpu_slot; | |
1684 | struct obj_cgroup *objcg = NULL; | |
f2badb0c | 1685 | static int warn_limit = 10; |
8744d859 | 1686 | struct pcpu_chunk *chunk, *next; |
f2badb0c | 1687 | const char *err; |
40064aec | 1688 | int slot, off, cpu, ret; |
403a91b1 | 1689 | unsigned long flags; |
f528f0b8 | 1690 | void __percpu *ptr; |
40064aec | 1691 | size_t bits, bit_align; |
fbf59bc9 | 1692 | |
28307d93 FM |
1693 | gfp = current_gfp_context(gfp); |
1694 | /* whitelisted flags that can be passed to the backing allocators */ | |
1695 | pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); | |
1696 | is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; | |
1697 | do_warn = !(gfp & __GFP_NOWARN); | |
1698 | ||
723ad1d9 | 1699 | /* |
40064aec DZF |
1700 | * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, |
1701 | * therefore alignment must be a minimum of that many bytes. | |
1702 | * An allocation may have internal fragmentation from rounding up | |
1703 | * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. | |
723ad1d9 | 1704 | */ |
d2f3c384 DZF |
1705 | if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) |
1706 | align = PCPU_MIN_ALLOC_SIZE; | |
723ad1d9 | 1707 | |
d2f3c384 | 1708 | size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); |
40064aec DZF |
1709 | bits = size >> PCPU_MIN_ALLOC_SHIFT; |
1710 | bit_align = align >> PCPU_MIN_ALLOC_SHIFT; | |
2f69fa82 | 1711 | |
3ca45a46 | 1712 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || |
1713 | !is_power_of_2(align))) { | |
0ea7eeec | 1714 | WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", |
756a025f | 1715 | size, align); |
fbf59bc9 TH |
1716 | return NULL; |
1717 | } | |
1718 | ||
3c7be18a RG |
1719 | type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg); |
1720 | if (unlikely(type == PCPU_FAIL_ALLOC)) | |
1721 | return NULL; | |
1722 | pcpu_slot = pcpu_chunk_list(type); | |
1723 | ||
f52ba1fe KT |
1724 | if (!is_atomic) { |
1725 | /* | |
1726 | * pcpu_balance_workfn() allocates memory under this mutex, | |
1727 | * and it may wait for memory reclaim. Allow current task | |
1728 | * to become OOM victim, in case of memory pressure. | |
1729 | */ | |
3c7be18a | 1730 | if (gfp & __GFP_NOFAIL) { |
f52ba1fe | 1731 | mutex_lock(&pcpu_alloc_mutex); |
3c7be18a RG |
1732 | } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { |
1733 | pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); | |
f52ba1fe | 1734 | return NULL; |
3c7be18a | 1735 | } |
f52ba1fe | 1736 | } |
6710e594 | 1737 | |
403a91b1 | 1738 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 | 1739 | |
edcb4639 TH |
1740 | /* serve reserved allocations from the reserved chunk if available */ |
1741 | if (reserved && pcpu_reserved_chunk) { | |
1742 | chunk = pcpu_reserved_chunk; | |
833af842 | 1743 | |
40064aec DZF |
1744 | off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); |
1745 | if (off < 0) { | |
833af842 | 1746 | err = "alloc from reserved chunk failed"; |
ccea34b5 | 1747 | goto fail_unlock; |
f2badb0c | 1748 | } |
833af842 | 1749 | |
40064aec | 1750 | off = pcpu_alloc_area(chunk, bits, bit_align, off); |
edcb4639 TH |
1751 | if (off >= 0) |
1752 | goto area_found; | |
833af842 | 1753 | |
f2badb0c | 1754 | err = "alloc from reserved chunk failed"; |
ccea34b5 | 1755 | goto fail_unlock; |
edcb4639 TH |
1756 | } |
1757 | ||
ccea34b5 | 1758 | restart: |
edcb4639 | 1759 | /* search through normal chunks */ |
fbf59bc9 | 1760 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { |
8744d859 | 1761 | list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { |
40064aec DZF |
1762 | off = pcpu_find_block_fit(chunk, bits, bit_align, |
1763 | is_atomic); | |
8744d859 DZ |
1764 | if (off < 0) { |
1765 | if (slot < PCPU_SLOT_FAIL_THRESHOLD) | |
1766 | pcpu_chunk_move(chunk, 0); | |
fbf59bc9 | 1767 | continue; |
8744d859 | 1768 | } |
ccea34b5 | 1769 | |
40064aec | 1770 | off = pcpu_alloc_area(chunk, bits, bit_align, off); |
fbf59bc9 TH |
1771 | if (off >= 0) |
1772 | goto area_found; | |
40064aec | 1773 | |
fbf59bc9 TH |
1774 | } |
1775 | } | |
1776 | ||
403a91b1 | 1777 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 1778 | |
b38d08f3 TH |
1779 | /* |
1780 | * No space left. Create a new chunk. We don't want multiple | |
1781 | * tasks to create chunks simultaneously. Serialize and create iff | |
1782 | * there's still no empty chunk after grabbing the mutex. | |
1783 | */ | |
11df02bf DZ |
1784 | if (is_atomic) { |
1785 | err = "atomic alloc failed, no space left"; | |
5835d96e | 1786 | goto fail; |
11df02bf | 1787 | } |
5835d96e | 1788 | |
b38d08f3 | 1789 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { |
3c7be18a | 1790 | chunk = pcpu_create_chunk(type, pcpu_gfp); |
b38d08f3 TH |
1791 | if (!chunk) { |
1792 | err = "failed to allocate new chunk"; | |
1793 | goto fail; | |
1794 | } | |
1795 | ||
1796 | spin_lock_irqsave(&pcpu_lock, flags); | |
1797 | pcpu_chunk_relocate(chunk, -1); | |
1798 | } else { | |
1799 | spin_lock_irqsave(&pcpu_lock, flags); | |
f2badb0c | 1800 | } |
ccea34b5 | 1801 | |
ccea34b5 | 1802 | goto restart; |
fbf59bc9 TH |
1803 | |
1804 | area_found: | |
30a5b536 | 1805 | pcpu_stats_area_alloc(chunk, size); |
403a91b1 | 1806 | spin_unlock_irqrestore(&pcpu_lock, flags); |
ccea34b5 | 1807 | |
dca49645 | 1808 | /* populate if not all pages are already there */ |
5835d96e | 1809 | if (!is_atomic) { |
e837dfde | 1810 | unsigned int page_start, page_end, rs, re; |
dca49645 | 1811 | |
e04d3208 TH |
1812 | page_start = PFN_DOWN(off); |
1813 | page_end = PFN_UP(off + size); | |
b38d08f3 | 1814 | |
e837dfde DZ |
1815 | bitmap_for_each_clear_region(chunk->populated, rs, re, |
1816 | page_start, page_end) { | |
e04d3208 TH |
1817 | WARN_ON(chunk->immutable); |
1818 | ||
554fef1c | 1819 | ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); |
e04d3208 TH |
1820 | |
1821 | spin_lock_irqsave(&pcpu_lock, flags); | |
1822 | if (ret) { | |
40064aec | 1823 | pcpu_free_area(chunk, off); |
e04d3208 TH |
1824 | err = "failed to populate"; |
1825 | goto fail_unlock; | |
1826 | } | |
b239f7da | 1827 | pcpu_chunk_populated(chunk, rs, re); |
e04d3208 | 1828 | spin_unlock_irqrestore(&pcpu_lock, flags); |
dca49645 | 1829 | } |
fbf59bc9 | 1830 | |
e04d3208 TH |
1831 | mutex_unlock(&pcpu_alloc_mutex); |
1832 | } | |
ccea34b5 | 1833 | |
1a4d7607 TH |
1834 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
1835 | pcpu_schedule_balance_work(); | |
1836 | ||
dca49645 TH |
1837 | /* clear the areas and return address relative to base address */ |
1838 | for_each_possible_cpu(cpu) | |
1839 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | |
1840 | ||
f528f0b8 | 1841 | ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); |
8a8c35fa | 1842 | kmemleak_alloc_percpu(ptr, size, gfp); |
df95e795 DZ |
1843 | |
1844 | trace_percpu_alloc_percpu(reserved, is_atomic, size, align, | |
1845 | chunk->base_addr, off, ptr); | |
1846 | ||
3c7be18a RG |
1847 | pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); |
1848 | ||
f528f0b8 | 1849 | return ptr; |
ccea34b5 TH |
1850 | |
1851 | fail_unlock: | |
403a91b1 | 1852 | spin_unlock_irqrestore(&pcpu_lock, flags); |
b38d08f3 | 1853 | fail: |
df95e795 DZ |
1854 | trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); |
1855 | ||
0ea7eeec | 1856 | if (!is_atomic && do_warn && warn_limit) { |
870d4b12 | 1857 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", |
598d8091 | 1858 | size, align, is_atomic, err); |
f2badb0c TH |
1859 | dump_stack(); |
1860 | if (!--warn_limit) | |
870d4b12 | 1861 | pr_info("limit reached, disable warning\n"); |
f2badb0c | 1862 | } |
1a4d7607 TH |
1863 | if (is_atomic) { |
1864 | /* see the flag handling in pcpu_blance_workfn() */ | |
1865 | pcpu_atomic_alloc_failed = true; | |
1866 | pcpu_schedule_balance_work(); | |
6710e594 TH |
1867 | } else { |
1868 | mutex_unlock(&pcpu_alloc_mutex); | |
1a4d7607 | 1869 | } |
3c7be18a RG |
1870 | |
1871 | pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); | |
1872 | ||
ccea34b5 | 1873 | return NULL; |
fbf59bc9 | 1874 | } |
edcb4639 TH |
1875 | |
1876 | /** | |
5835d96e | 1877 | * __alloc_percpu_gfp - allocate dynamic percpu area |
edcb4639 TH |
1878 | * @size: size of area to allocate in bytes |
1879 | * @align: alignment of area (max PAGE_SIZE) | |
5835d96e | 1880 | * @gfp: allocation flags |
edcb4639 | 1881 | * |
5835d96e TH |
1882 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If |
1883 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can | |
0ea7eeec DB |
1884 | * be called from any context but is a lot more likely to fail. If @gfp |
1885 | * has __GFP_NOWARN then no warning will be triggered on invalid or failed | |
1886 | * allocation requests. | |
ccea34b5 | 1887 | * |
edcb4639 TH |
1888 | * RETURNS: |
1889 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1890 | */ | |
5835d96e TH |
1891 | void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) |
1892 | { | |
1893 | return pcpu_alloc(size, align, false, gfp); | |
1894 | } | |
1895 | EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); | |
1896 | ||
1897 | /** | |
1898 | * __alloc_percpu - allocate dynamic percpu area | |
1899 | * @size: size of area to allocate in bytes | |
1900 | * @align: alignment of area (max PAGE_SIZE) | |
1901 | * | |
1902 | * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). | |
1903 | */ | |
43cf38eb | 1904 | void __percpu *__alloc_percpu(size_t size, size_t align) |
edcb4639 | 1905 | { |
5835d96e | 1906 | return pcpu_alloc(size, align, false, GFP_KERNEL); |
edcb4639 | 1907 | } |
fbf59bc9 TH |
1908 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
1909 | ||
edcb4639 TH |
1910 | /** |
1911 | * __alloc_reserved_percpu - allocate reserved percpu area | |
1912 | * @size: size of area to allocate in bytes | |
1913 | * @align: alignment of area (max PAGE_SIZE) | |
1914 | * | |
9329ba97 TH |
1915 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
1916 | * from reserved percpu area if arch has set it up; otherwise, | |
1917 | * allocation is served from the same dynamic area. Might sleep. | |
1918 | * Might trigger writeouts. | |
edcb4639 | 1919 | * |
ccea34b5 TH |
1920 | * CONTEXT: |
1921 | * Does GFP_KERNEL allocation. | |
1922 | * | |
edcb4639 TH |
1923 | * RETURNS: |
1924 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1925 | */ | |
43cf38eb | 1926 | void __percpu *__alloc_reserved_percpu(size_t size, size_t align) |
edcb4639 | 1927 | { |
5835d96e | 1928 | return pcpu_alloc(size, align, true, GFP_KERNEL); |
edcb4639 TH |
1929 | } |
1930 | ||
a56dbddf | 1931 | /** |
3c7be18a RG |
1932 | * __pcpu_balance_workfn - manage the amount of free chunks and populated pages |
1933 | * @type: chunk type | |
a56dbddf | 1934 | * |
47504ee0 DZ |
1935 | * Reclaim all fully free chunks except for the first one. This is also |
1936 | * responsible for maintaining the pool of empty populated pages. However, | |
1937 | * it is possible that this is called when physical memory is scarce causing | |
1938 | * OOM killer to be triggered. We should avoid doing so until an actual | |
1939 | * allocation causes the failure as it is possible that requests can be | |
1940 | * serviced from already backed regions. | |
a56dbddf | 1941 | */ |
3c7be18a | 1942 | static void __pcpu_balance_workfn(enum pcpu_chunk_type type) |
fbf59bc9 | 1943 | { |
47504ee0 | 1944 | /* gfp flags passed to underlying allocators */ |
554fef1c | 1945 | const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; |
fe6bd8c3 | 1946 | LIST_HEAD(to_free); |
3c7be18a | 1947 | struct list_head *pcpu_slot = pcpu_chunk_list(type); |
fe6bd8c3 | 1948 | struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; |
a56dbddf | 1949 | struct pcpu_chunk *chunk, *next; |
1a4d7607 | 1950 | int slot, nr_to_pop, ret; |
a56dbddf | 1951 | |
1a4d7607 TH |
1952 | /* |
1953 | * There's no reason to keep around multiple unused chunks and VM | |
1954 | * areas can be scarce. Destroy all free chunks except for one. | |
1955 | */ | |
ccea34b5 TH |
1956 | mutex_lock(&pcpu_alloc_mutex); |
1957 | spin_lock_irq(&pcpu_lock); | |
a56dbddf | 1958 | |
fe6bd8c3 | 1959 | list_for_each_entry_safe(chunk, next, free_head, list) { |
a56dbddf TH |
1960 | WARN_ON(chunk->immutable); |
1961 | ||
1962 | /* spare the first one */ | |
fe6bd8c3 | 1963 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
a56dbddf TH |
1964 | continue; |
1965 | ||
fe6bd8c3 | 1966 | list_move(&chunk->list, &to_free); |
a56dbddf TH |
1967 | } |
1968 | ||
ccea34b5 | 1969 | spin_unlock_irq(&pcpu_lock); |
a56dbddf | 1970 | |
fe6bd8c3 | 1971 | list_for_each_entry_safe(chunk, next, &to_free, list) { |
e837dfde | 1972 | unsigned int rs, re; |
dca49645 | 1973 | |
e837dfde DZ |
1974 | bitmap_for_each_set_region(chunk->populated, rs, re, 0, |
1975 | chunk->nr_pages) { | |
a93ace48 | 1976 | pcpu_depopulate_chunk(chunk, rs, re); |
b539b87f TH |
1977 | spin_lock_irq(&pcpu_lock); |
1978 | pcpu_chunk_depopulated(chunk, rs, re); | |
1979 | spin_unlock_irq(&pcpu_lock); | |
a93ace48 | 1980 | } |
6081089f | 1981 | pcpu_destroy_chunk(chunk); |
accd4f36 | 1982 | cond_resched(); |
a56dbddf | 1983 | } |
971f3918 | 1984 | |
1a4d7607 TH |
1985 | /* |
1986 | * Ensure there are certain number of free populated pages for | |
1987 | * atomic allocs. Fill up from the most packed so that atomic | |
1988 | * allocs don't increase fragmentation. If atomic allocation | |
1989 | * failed previously, always populate the maximum amount. This | |
1990 | * should prevent atomic allocs larger than PAGE_SIZE from keeping | |
1991 | * failing indefinitely; however, large atomic allocs are not | |
1992 | * something we support properly and can be highly unreliable and | |
1993 | * inefficient. | |
1994 | */ | |
1995 | retry_pop: | |
1996 | if (pcpu_atomic_alloc_failed) { | |
1997 | nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; | |
1998 | /* best effort anyway, don't worry about synchronization */ | |
1999 | pcpu_atomic_alloc_failed = false; | |
2000 | } else { | |
2001 | nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - | |
2002 | pcpu_nr_empty_pop_pages, | |
2003 | 0, PCPU_EMPTY_POP_PAGES_HIGH); | |
2004 | } | |
2005 | ||
2006 | for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { | |
e837dfde | 2007 | unsigned int nr_unpop = 0, rs, re; |
1a4d7607 TH |
2008 | |
2009 | if (!nr_to_pop) | |
2010 | break; | |
2011 | ||
2012 | spin_lock_irq(&pcpu_lock); | |
2013 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
8ab16c43 | 2014 | nr_unpop = chunk->nr_pages - chunk->nr_populated; |
1a4d7607 TH |
2015 | if (nr_unpop) |
2016 | break; | |
2017 | } | |
2018 | spin_unlock_irq(&pcpu_lock); | |
2019 | ||
2020 | if (!nr_unpop) | |
2021 | continue; | |
2022 | ||
2023 | /* @chunk can't go away while pcpu_alloc_mutex is held */ | |
e837dfde DZ |
2024 | bitmap_for_each_clear_region(chunk->populated, rs, re, 0, |
2025 | chunk->nr_pages) { | |
2026 | int nr = min_t(int, re - rs, nr_to_pop); | |
1a4d7607 | 2027 | |
47504ee0 | 2028 | ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); |
1a4d7607 TH |
2029 | if (!ret) { |
2030 | nr_to_pop -= nr; | |
2031 | spin_lock_irq(&pcpu_lock); | |
b239f7da | 2032 | pcpu_chunk_populated(chunk, rs, rs + nr); |
1a4d7607 TH |
2033 | spin_unlock_irq(&pcpu_lock); |
2034 | } else { | |
2035 | nr_to_pop = 0; | |
2036 | } | |
2037 | ||
2038 | if (!nr_to_pop) | |
2039 | break; | |
2040 | } | |
2041 | } | |
2042 | ||
2043 | if (nr_to_pop) { | |
2044 | /* ran out of chunks to populate, create a new one and retry */ | |
3c7be18a | 2045 | chunk = pcpu_create_chunk(type, gfp); |
1a4d7607 TH |
2046 | if (chunk) { |
2047 | spin_lock_irq(&pcpu_lock); | |
2048 | pcpu_chunk_relocate(chunk, -1); | |
2049 | spin_unlock_irq(&pcpu_lock); | |
2050 | goto retry_pop; | |
2051 | } | |
2052 | } | |
2053 | ||
971f3918 | 2054 | mutex_unlock(&pcpu_alloc_mutex); |
fbf59bc9 TH |
2055 | } |
2056 | ||
3c7be18a RG |
2057 | /** |
2058 | * pcpu_balance_workfn - manage the amount of free chunks and populated pages | |
2059 | * @work: unused | |
2060 | * | |
2061 | * Call __pcpu_balance_workfn() for each chunk type. | |
2062 | */ | |
2063 | static void pcpu_balance_workfn(struct work_struct *work) | |
2064 | { | |
2065 | enum pcpu_chunk_type type; | |
2066 | ||
2067 | for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) | |
2068 | __pcpu_balance_workfn(type); | |
2069 | } | |
2070 | ||
fbf59bc9 TH |
2071 | /** |
2072 | * free_percpu - free percpu area | |
2073 | * @ptr: pointer to area to free | |
2074 | * | |
ccea34b5 TH |
2075 | * Free percpu area @ptr. |
2076 | * | |
2077 | * CONTEXT: | |
2078 | * Can be called from atomic context. | |
fbf59bc9 | 2079 | */ |
43cf38eb | 2080 | void free_percpu(void __percpu *ptr) |
fbf59bc9 | 2081 | { |
129182e5 | 2082 | void *addr; |
fbf59bc9 | 2083 | struct pcpu_chunk *chunk; |
ccea34b5 | 2084 | unsigned long flags; |
3c7be18a | 2085 | int size, off; |
198790d9 | 2086 | bool need_balance = false; |
3c7be18a | 2087 | struct list_head *pcpu_slot; |
fbf59bc9 TH |
2088 | |
2089 | if (!ptr) | |
2090 | return; | |
2091 | ||
f528f0b8 CM |
2092 | kmemleak_free_percpu(ptr); |
2093 | ||
129182e5 AM |
2094 | addr = __pcpu_ptr_to_addr(ptr); |
2095 | ||
ccea34b5 | 2096 | spin_lock_irqsave(&pcpu_lock, flags); |
fbf59bc9 TH |
2097 | |
2098 | chunk = pcpu_chunk_addr_search(addr); | |
bba174f5 | 2099 | off = addr - chunk->base_addr; |
fbf59bc9 | 2100 | |
3c7be18a RG |
2101 | size = pcpu_free_area(chunk, off); |
2102 | ||
2103 | pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk)); | |
2104 | ||
2105 | pcpu_memcg_free_hook(chunk, off, size); | |
fbf59bc9 | 2106 | |
a56dbddf | 2107 | /* if there are more than one fully free chunks, wake up grim reaper */ |
40064aec | 2108 | if (chunk->free_bytes == pcpu_unit_size) { |
fbf59bc9 TH |
2109 | struct pcpu_chunk *pos; |
2110 | ||
a56dbddf | 2111 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) |
fbf59bc9 | 2112 | if (pos != chunk) { |
198790d9 | 2113 | need_balance = true; |
fbf59bc9 TH |
2114 | break; |
2115 | } | |
2116 | } | |
2117 | ||
df95e795 DZ |
2118 | trace_percpu_free_percpu(chunk->base_addr, off, ptr); |
2119 | ||
ccea34b5 | 2120 | spin_unlock_irqrestore(&pcpu_lock, flags); |
198790d9 JS |
2121 | |
2122 | if (need_balance) | |
2123 | pcpu_schedule_balance_work(); | |
fbf59bc9 TH |
2124 | } |
2125 | EXPORT_SYMBOL_GPL(free_percpu); | |
2126 | ||
383776fa | 2127 | bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) |
10fad5e4 | 2128 | { |
bbddff05 | 2129 | #ifdef CONFIG_SMP |
10fad5e4 TH |
2130 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
2131 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | |
2132 | unsigned int cpu; | |
2133 | ||
2134 | for_each_possible_cpu(cpu) { | |
2135 | void *start = per_cpu_ptr(base, cpu); | |
383776fa | 2136 | void *va = (void *)addr; |
10fad5e4 | 2137 | |
383776fa | 2138 | if (va >= start && va < start + static_size) { |
8ce371f9 | 2139 | if (can_addr) { |
383776fa | 2140 | *can_addr = (unsigned long) (va - start); |
8ce371f9 PZ |
2141 | *can_addr += (unsigned long) |
2142 | per_cpu_ptr(base, get_boot_cpu_id()); | |
2143 | } | |
10fad5e4 | 2144 | return true; |
383776fa TG |
2145 | } |
2146 | } | |
bbddff05 TH |
2147 | #endif |
2148 | /* on UP, can't distinguish from other static vars, always false */ | |
10fad5e4 TH |
2149 | return false; |
2150 | } | |
2151 | ||
383776fa TG |
2152 | /** |
2153 | * is_kernel_percpu_address - test whether address is from static percpu area | |
2154 | * @addr: address to test | |
2155 | * | |
2156 | * Test whether @addr belongs to in-kernel static percpu area. Module | |
2157 | * static percpu areas are not considered. For those, use | |
2158 | * is_module_percpu_address(). | |
2159 | * | |
2160 | * RETURNS: | |
2161 | * %true if @addr is from in-kernel static percpu area, %false otherwise. | |
2162 | */ | |
2163 | bool is_kernel_percpu_address(unsigned long addr) | |
2164 | { | |
2165 | return __is_kernel_percpu_address(addr, NULL); | |
2166 | } | |
2167 | ||
3b034b0d VG |
2168 | /** |
2169 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | |
2170 | * @addr: the address to be converted to physical address | |
2171 | * | |
2172 | * Given @addr which is dereferenceable address obtained via one of | |
2173 | * percpu access macros, this function translates it into its physical | |
2174 | * address. The caller is responsible for ensuring @addr stays valid | |
2175 | * until this function finishes. | |
2176 | * | |
67589c71 DY |
2177 | * percpu allocator has special setup for the first chunk, which currently |
2178 | * supports either embedding in linear address space or vmalloc mapping, | |
2179 | * and, from the second one, the backing allocator (currently either vm or | |
2180 | * km) provides translation. | |
2181 | * | |
bffc4375 | 2182 | * The addr can be translated simply without checking if it falls into the |
67589c71 DY |
2183 | * first chunk. But the current code reflects better how percpu allocator |
2184 | * actually works, and the verification can discover both bugs in percpu | |
2185 | * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current | |
2186 | * code. | |
2187 | * | |
3b034b0d VG |
2188 | * RETURNS: |
2189 | * The physical address for @addr. | |
2190 | */ | |
2191 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | |
2192 | { | |
9983b6f0 TH |
2193 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
2194 | bool in_first_chunk = false; | |
a855b84c | 2195 | unsigned long first_low, first_high; |
9983b6f0 TH |
2196 | unsigned int cpu; |
2197 | ||
2198 | /* | |
a855b84c | 2199 | * The following test on unit_low/high isn't strictly |
9983b6f0 TH |
2200 | * necessary but will speed up lookups of addresses which |
2201 | * aren't in the first chunk. | |
c0ebfdc3 DZF |
2202 | * |
2203 | * The address check is against full chunk sizes. pcpu_base_addr | |
2204 | * points to the beginning of the first chunk including the | |
2205 | * static region. Assumes good intent as the first chunk may | |
2206 | * not be full (ie. < pcpu_unit_pages in size). | |
9983b6f0 | 2207 | */ |
c0ebfdc3 DZF |
2208 | first_low = (unsigned long)pcpu_base_addr + |
2209 | pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); | |
2210 | first_high = (unsigned long)pcpu_base_addr + | |
2211 | pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); | |
a855b84c TH |
2212 | if ((unsigned long)addr >= first_low && |
2213 | (unsigned long)addr < first_high) { | |
9983b6f0 TH |
2214 | for_each_possible_cpu(cpu) { |
2215 | void *start = per_cpu_ptr(base, cpu); | |
2216 | ||
2217 | if (addr >= start && addr < start + pcpu_unit_size) { | |
2218 | in_first_chunk = true; | |
2219 | break; | |
2220 | } | |
2221 | } | |
2222 | } | |
2223 | ||
2224 | if (in_first_chunk) { | |
eac522ef | 2225 | if (!is_vmalloc_addr(addr)) |
020ec653 TH |
2226 | return __pa(addr); |
2227 | else | |
9f57bd4d ES |
2228 | return page_to_phys(vmalloc_to_page(addr)) + |
2229 | offset_in_page(addr); | |
020ec653 | 2230 | } else |
9f57bd4d ES |
2231 | return page_to_phys(pcpu_addr_to_page(addr)) + |
2232 | offset_in_page(addr); | |
3b034b0d VG |
2233 | } |
2234 | ||
fbf59bc9 | 2235 | /** |
fd1e8a1f TH |
2236 | * pcpu_alloc_alloc_info - allocate percpu allocation info |
2237 | * @nr_groups: the number of groups | |
2238 | * @nr_units: the number of units | |
2239 | * | |
2240 | * Allocate ai which is large enough for @nr_groups groups containing | |
2241 | * @nr_units units. The returned ai's groups[0].cpu_map points to the | |
2242 | * cpu_map array which is long enough for @nr_units and filled with | |
2243 | * NR_CPUS. It's the caller's responsibility to initialize cpu_map | |
2244 | * pointer of other groups. | |
2245 | * | |
2246 | * RETURNS: | |
2247 | * Pointer to the allocated pcpu_alloc_info on success, NULL on | |
2248 | * failure. | |
2249 | */ | |
2250 | struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | |
2251 | int nr_units) | |
2252 | { | |
2253 | struct pcpu_alloc_info *ai; | |
2254 | size_t base_size, ai_size; | |
2255 | void *ptr; | |
2256 | int unit; | |
2257 | ||
14d37612 | 2258 | base_size = ALIGN(struct_size(ai, groups, nr_groups), |
fd1e8a1f TH |
2259 | __alignof__(ai->groups[0].cpu_map[0])); |
2260 | ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); | |
2261 | ||
26fb3dae | 2262 | ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); |
fd1e8a1f TH |
2263 | if (!ptr) |
2264 | return NULL; | |
2265 | ai = ptr; | |
2266 | ptr += base_size; | |
2267 | ||
2268 | ai->groups[0].cpu_map = ptr; | |
2269 | ||
2270 | for (unit = 0; unit < nr_units; unit++) | |
2271 | ai->groups[0].cpu_map[unit] = NR_CPUS; | |
2272 | ||
2273 | ai->nr_groups = nr_groups; | |
2274 | ai->__ai_size = PFN_ALIGN(ai_size); | |
2275 | ||
2276 | return ai; | |
2277 | } | |
2278 | ||
2279 | /** | |
2280 | * pcpu_free_alloc_info - free percpu allocation info | |
2281 | * @ai: pcpu_alloc_info to free | |
2282 | * | |
2283 | * Free @ai which was allocated by pcpu_alloc_alloc_info(). | |
2284 | */ | |
2285 | void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |
2286 | { | |
999c17e3 | 2287 | memblock_free_early(__pa(ai), ai->__ai_size); |
fd1e8a1f TH |
2288 | } |
2289 | ||
fd1e8a1f TH |
2290 | /** |
2291 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | |
2292 | * @lvl: loglevel | |
2293 | * @ai: allocation info to dump | |
2294 | * | |
2295 | * Print out information about @ai using loglevel @lvl. | |
2296 | */ | |
2297 | static void pcpu_dump_alloc_info(const char *lvl, | |
2298 | const struct pcpu_alloc_info *ai) | |
033e48fb | 2299 | { |
fd1e8a1f | 2300 | int group_width = 1, cpu_width = 1, width; |
033e48fb | 2301 | char empty_str[] = "--------"; |
fd1e8a1f TH |
2302 | int alloc = 0, alloc_end = 0; |
2303 | int group, v; | |
2304 | int upa, apl; /* units per alloc, allocs per line */ | |
2305 | ||
2306 | v = ai->nr_groups; | |
2307 | while (v /= 10) | |
2308 | group_width++; | |
033e48fb | 2309 | |
fd1e8a1f | 2310 | v = num_possible_cpus(); |
033e48fb | 2311 | while (v /= 10) |
fd1e8a1f TH |
2312 | cpu_width++; |
2313 | empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; | |
033e48fb | 2314 | |
fd1e8a1f TH |
2315 | upa = ai->alloc_size / ai->unit_size; |
2316 | width = upa * (cpu_width + 1) + group_width + 3; | |
2317 | apl = rounddown_pow_of_two(max(60 / width, 1)); | |
033e48fb | 2318 | |
fd1e8a1f TH |
2319 | printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", |
2320 | lvl, ai->static_size, ai->reserved_size, ai->dyn_size, | |
2321 | ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); | |
033e48fb | 2322 | |
fd1e8a1f TH |
2323 | for (group = 0; group < ai->nr_groups; group++) { |
2324 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2325 | int unit = 0, unit_end = 0; | |
2326 | ||
2327 | BUG_ON(gi->nr_units % upa); | |
2328 | for (alloc_end += gi->nr_units / upa; | |
2329 | alloc < alloc_end; alloc++) { | |
2330 | if (!(alloc % apl)) { | |
1170532b | 2331 | pr_cont("\n"); |
fd1e8a1f TH |
2332 | printk("%spcpu-alloc: ", lvl); |
2333 | } | |
1170532b | 2334 | pr_cont("[%0*d] ", group_width, group); |
fd1e8a1f TH |
2335 | |
2336 | for (unit_end += upa; unit < unit_end; unit++) | |
2337 | if (gi->cpu_map[unit] != NR_CPUS) | |
1170532b JP |
2338 | pr_cont("%0*d ", |
2339 | cpu_width, gi->cpu_map[unit]); | |
fd1e8a1f | 2340 | else |
1170532b | 2341 | pr_cont("%s ", empty_str); |
033e48fb | 2342 | } |
033e48fb | 2343 | } |
1170532b | 2344 | pr_cont("\n"); |
033e48fb | 2345 | } |
033e48fb | 2346 | |
fbf59bc9 | 2347 | /** |
8d408b4b | 2348 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
fd1e8a1f | 2349 | * @ai: pcpu_alloc_info describing how to percpu area is shaped |
38a6be52 | 2350 | * @base_addr: mapped address |
8d408b4b TH |
2351 | * |
2352 | * Initialize the first percpu chunk which contains the kernel static | |
69ab285b | 2353 | * percpu area. This function is to be called from arch percpu area |
38a6be52 | 2354 | * setup path. |
8d408b4b | 2355 | * |
fd1e8a1f TH |
2356 | * @ai contains all information necessary to initialize the first |
2357 | * chunk and prime the dynamic percpu allocator. | |
2358 | * | |
2359 | * @ai->static_size is the size of static percpu area. | |
2360 | * | |
2361 | * @ai->reserved_size, if non-zero, specifies the amount of bytes to | |
edcb4639 TH |
2362 | * reserve after the static area in the first chunk. This reserves |
2363 | * the first chunk such that it's available only through reserved | |
2364 | * percpu allocation. This is primarily used to serve module percpu | |
2365 | * static areas on architectures where the addressing model has | |
2366 | * limited offset range for symbol relocations to guarantee module | |
2367 | * percpu symbols fall inside the relocatable range. | |
2368 | * | |
fd1e8a1f TH |
2369 | * @ai->dyn_size determines the number of bytes available for dynamic |
2370 | * allocation in the first chunk. The area between @ai->static_size + | |
2371 | * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. | |
6074d5b0 | 2372 | * |
fd1e8a1f TH |
2373 | * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE |
2374 | * and equal to or larger than @ai->static_size + @ai->reserved_size + | |
2375 | * @ai->dyn_size. | |
8d408b4b | 2376 | * |
fd1e8a1f TH |
2377 | * @ai->atom_size is the allocation atom size and used as alignment |
2378 | * for vm areas. | |
8d408b4b | 2379 | * |
fd1e8a1f TH |
2380 | * @ai->alloc_size is the allocation size and always multiple of |
2381 | * @ai->atom_size. This is larger than @ai->atom_size if | |
2382 | * @ai->unit_size is larger than @ai->atom_size. | |
2383 | * | |
2384 | * @ai->nr_groups and @ai->groups describe virtual memory layout of | |
2385 | * percpu areas. Units which should be colocated are put into the | |
2386 | * same group. Dynamic VM areas will be allocated according to these | |
2387 | * groupings. If @ai->nr_groups is zero, a single group containing | |
2388 | * all units is assumed. | |
8d408b4b | 2389 | * |
38a6be52 TH |
2390 | * The caller should have mapped the first chunk at @base_addr and |
2391 | * copied static data to each unit. | |
fbf59bc9 | 2392 | * |
c0ebfdc3 DZF |
2393 | * The first chunk will always contain a static and a dynamic region. |
2394 | * However, the static region is not managed by any chunk. If the first | |
2395 | * chunk also contains a reserved region, it is served by two chunks - | |
2396 | * one for the reserved region and one for the dynamic region. They | |
2397 | * share the same vm, but use offset regions in the area allocation map. | |
2398 | * The chunk serving the dynamic region is circulated in the chunk slots | |
2399 | * and available for dynamic allocation like any other chunk. | |
fbf59bc9 | 2400 | */ |
163fa234 KW |
2401 | void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
2402 | void *base_addr) | |
fbf59bc9 | 2403 | { |
b9c39442 | 2404 | size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
d2f3c384 | 2405 | size_t static_size, dyn_size; |
0c4169c3 | 2406 | struct pcpu_chunk *chunk; |
6563297c TH |
2407 | unsigned long *group_offsets; |
2408 | size_t *group_sizes; | |
fb435d52 | 2409 | unsigned long *unit_off; |
fbf59bc9 | 2410 | unsigned int cpu; |
fd1e8a1f TH |
2411 | int *unit_map; |
2412 | int group, unit, i; | |
c0ebfdc3 DZF |
2413 | int map_size; |
2414 | unsigned long tmp_addr; | |
f655f405 | 2415 | size_t alloc_size; |
3c7be18a | 2416 | enum pcpu_chunk_type type; |
fbf59bc9 | 2417 | |
635b75fc TH |
2418 | #define PCPU_SETUP_BUG_ON(cond) do { \ |
2419 | if (unlikely(cond)) { \ | |
870d4b12 JP |
2420 | pr_emerg("failed to initialize, %s\n", #cond); \ |
2421 | pr_emerg("cpu_possible_mask=%*pb\n", \ | |
807de073 | 2422 | cpumask_pr_args(cpu_possible_mask)); \ |
635b75fc TH |
2423 | pcpu_dump_alloc_info(KERN_EMERG, ai); \ |
2424 | BUG(); \ | |
2425 | } \ | |
2426 | } while (0) | |
2427 | ||
2f39e637 | 2428 | /* sanity checks */ |
635b75fc | 2429 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
bbddff05 | 2430 | #ifdef CONFIG_SMP |
635b75fc | 2431 | PCPU_SETUP_BUG_ON(!ai->static_size); |
f09f1243 | 2432 | PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); |
bbddff05 | 2433 | #endif |
635b75fc | 2434 | PCPU_SETUP_BUG_ON(!base_addr); |
f09f1243 | 2435 | PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); |
635b75fc | 2436 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
f09f1243 | 2437 | PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); |
635b75fc | 2438 | PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); |
ca460b3c | 2439 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); |
099a19d9 | 2440 | PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); |
fb29a2cc | 2441 | PCPU_SETUP_BUG_ON(!ai->dyn_size); |
d2f3c384 | 2442 | PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); |
ca460b3c DZF |
2443 | PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || |
2444 | IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); | |
9f645532 | 2445 | PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); |
8d408b4b | 2446 | |
6563297c | 2447 | /* process group information and build config tables accordingly */ |
f655f405 MR |
2448 | alloc_size = ai->nr_groups * sizeof(group_offsets[0]); |
2449 | group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2450 | if (!group_offsets) | |
2451 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2452 | alloc_size); | |
2453 | ||
2454 | alloc_size = ai->nr_groups * sizeof(group_sizes[0]); | |
2455 | group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2456 | if (!group_sizes) | |
2457 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2458 | alloc_size); | |
2459 | ||
2460 | alloc_size = nr_cpu_ids * sizeof(unit_map[0]); | |
2461 | unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2462 | if (!unit_map) | |
2463 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2464 | alloc_size); | |
2465 | ||
2466 | alloc_size = nr_cpu_ids * sizeof(unit_off[0]); | |
2467 | unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); | |
2468 | if (!unit_off) | |
2469 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2470 | alloc_size); | |
2f39e637 | 2471 | |
fd1e8a1f | 2472 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
ffe0d5a5 | 2473 | unit_map[cpu] = UINT_MAX; |
a855b84c TH |
2474 | |
2475 | pcpu_low_unit_cpu = NR_CPUS; | |
2476 | pcpu_high_unit_cpu = NR_CPUS; | |
2f39e637 | 2477 | |
fd1e8a1f TH |
2478 | for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { |
2479 | const struct pcpu_group_info *gi = &ai->groups[group]; | |
2f39e637 | 2480 | |
6563297c TH |
2481 | group_offsets[group] = gi->base_offset; |
2482 | group_sizes[group] = gi->nr_units * ai->unit_size; | |
2483 | ||
fd1e8a1f TH |
2484 | for (i = 0; i < gi->nr_units; i++) { |
2485 | cpu = gi->cpu_map[i]; | |
2486 | if (cpu == NR_CPUS) | |
2487 | continue; | |
8d408b4b | 2488 | |
9f295664 | 2489 | PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); |
635b75fc TH |
2490 | PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); |
2491 | PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); | |
fbf59bc9 | 2492 | |
fd1e8a1f | 2493 | unit_map[cpu] = unit + i; |
fb435d52 TH |
2494 | unit_off[cpu] = gi->base_offset + i * ai->unit_size; |
2495 | ||
a855b84c TH |
2496 | /* determine low/high unit_cpu */ |
2497 | if (pcpu_low_unit_cpu == NR_CPUS || | |
2498 | unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) | |
2499 | pcpu_low_unit_cpu = cpu; | |
2500 | if (pcpu_high_unit_cpu == NR_CPUS || | |
2501 | unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) | |
2502 | pcpu_high_unit_cpu = cpu; | |
fd1e8a1f | 2503 | } |
2f39e637 | 2504 | } |
fd1e8a1f TH |
2505 | pcpu_nr_units = unit; |
2506 | ||
2507 | for_each_possible_cpu(cpu) | |
635b75fc TH |
2508 | PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); |
2509 | ||
2510 | /* we're done parsing the input, undefine BUG macro and dump config */ | |
2511 | #undef PCPU_SETUP_BUG_ON | |
bcbea798 | 2512 | pcpu_dump_alloc_info(KERN_DEBUG, ai); |
fd1e8a1f | 2513 | |
6563297c TH |
2514 | pcpu_nr_groups = ai->nr_groups; |
2515 | pcpu_group_offsets = group_offsets; | |
2516 | pcpu_group_sizes = group_sizes; | |
fd1e8a1f | 2517 | pcpu_unit_map = unit_map; |
fb435d52 | 2518 | pcpu_unit_offsets = unit_off; |
2f39e637 TH |
2519 | |
2520 | /* determine basic parameters */ | |
fd1e8a1f | 2521 | pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; |
d9b55eeb | 2522 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
6563297c | 2523 | pcpu_atom_size = ai->atom_size; |
61cf93d3 DZ |
2524 | pcpu_chunk_struct_size = struct_size(chunk, populated, |
2525 | BITS_TO_LONGS(pcpu_unit_pages)); | |
cafe8816 | 2526 | |
30a5b536 DZ |
2527 | pcpu_stats_save_ai(ai); |
2528 | ||
d9b55eeb TH |
2529 | /* |
2530 | * Allocate chunk slots. The additional last slot is for | |
2531 | * empty chunks. | |
2532 | */ | |
2533 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
3c7be18a RG |
2534 | pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * |
2535 | sizeof(pcpu_chunk_lists[0]) * | |
2536 | PCPU_NR_CHUNK_TYPES, | |
2537 | SMP_CACHE_BYTES); | |
2538 | if (!pcpu_chunk_lists) | |
f655f405 | 2539 | panic("%s: Failed to allocate %zu bytes\n", __func__, |
3c7be18a RG |
2540 | pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) * |
2541 | PCPU_NR_CHUNK_TYPES); | |
2542 | ||
2543 | for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) | |
2544 | for (i = 0; i < pcpu_nr_slots; i++) | |
2545 | INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]); | |
fbf59bc9 | 2546 | |
d2f3c384 DZF |
2547 | /* |
2548 | * The end of the static region needs to be aligned with the | |
2549 | * minimum allocation size as this offsets the reserved and | |
2550 | * dynamic region. The first chunk ends page aligned by | |
2551 | * expanding the dynamic region, therefore the dynamic region | |
2552 | * can be shrunk to compensate while still staying above the | |
2553 | * configured sizes. | |
2554 | */ | |
2555 | static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); | |
2556 | dyn_size = ai->dyn_size - (static_size - ai->static_size); | |
2557 | ||
edcb4639 | 2558 | /* |
c0ebfdc3 DZF |
2559 | * Initialize first chunk. |
2560 | * If the reserved_size is non-zero, this initializes the reserved | |
2561 | * chunk. If the reserved_size is zero, the reserved chunk is NULL | |
2562 | * and the dynamic region is initialized here. The first chunk, | |
2563 | * pcpu_first_chunk, will always point to the chunk that serves | |
2564 | * the dynamic region. | |
edcb4639 | 2565 | */ |
d2f3c384 DZF |
2566 | tmp_addr = (unsigned long)base_addr + static_size; |
2567 | map_size = ai->reserved_size ?: dyn_size; | |
40064aec | 2568 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); |
61ace7fa | 2569 | |
edcb4639 | 2570 | /* init dynamic chunk if necessary */ |
b9c39442 | 2571 | if (ai->reserved_size) { |
0c4169c3 | 2572 | pcpu_reserved_chunk = chunk; |
b9c39442 | 2573 | |
d2f3c384 | 2574 | tmp_addr = (unsigned long)base_addr + static_size + |
c0ebfdc3 | 2575 | ai->reserved_size; |
d2f3c384 | 2576 | map_size = dyn_size; |
40064aec | 2577 | chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); |
edcb4639 TH |
2578 | } |
2579 | ||
2441d15c | 2580 | /* link the first chunk in */ |
0c4169c3 | 2581 | pcpu_first_chunk = chunk; |
0cecf50c | 2582 | pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; |
ae9e6bc9 | 2583 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
fbf59bc9 | 2584 | |
7e8a6304 DZF |
2585 | /* include all regions of the first chunk */ |
2586 | pcpu_nr_populated += PFN_DOWN(size_sum); | |
2587 | ||
30a5b536 | 2588 | pcpu_stats_chunk_alloc(); |
df95e795 | 2589 | trace_percpu_create_chunk(base_addr); |
30a5b536 | 2590 | |
fbf59bc9 | 2591 | /* we're done */ |
bba174f5 | 2592 | pcpu_base_addr = base_addr; |
fbf59bc9 | 2593 | } |
66c3a757 | 2594 | |
bbddff05 TH |
2595 | #ifdef CONFIG_SMP |
2596 | ||
17f3609c | 2597 | const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { |
f58dc01b TH |
2598 | [PCPU_FC_AUTO] = "auto", |
2599 | [PCPU_FC_EMBED] = "embed", | |
2600 | [PCPU_FC_PAGE] = "page", | |
f58dc01b | 2601 | }; |
66c3a757 | 2602 | |
f58dc01b | 2603 | enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; |
66c3a757 | 2604 | |
f58dc01b TH |
2605 | static int __init percpu_alloc_setup(char *str) |
2606 | { | |
5479c78a CG |
2607 | if (!str) |
2608 | return -EINVAL; | |
2609 | ||
f58dc01b TH |
2610 | if (0) |
2611 | /* nada */; | |
2612 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | |
2613 | else if (!strcmp(str, "embed")) | |
2614 | pcpu_chosen_fc = PCPU_FC_EMBED; | |
2615 | #endif | |
2616 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
2617 | else if (!strcmp(str, "page")) | |
2618 | pcpu_chosen_fc = PCPU_FC_PAGE; | |
f58dc01b TH |
2619 | #endif |
2620 | else | |
870d4b12 | 2621 | pr_warn("unknown allocator %s specified\n", str); |
66c3a757 | 2622 | |
f58dc01b | 2623 | return 0; |
66c3a757 | 2624 | } |
f58dc01b | 2625 | early_param("percpu_alloc", percpu_alloc_setup); |
66c3a757 | 2626 | |
3c9a024f TH |
2627 | /* |
2628 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | |
2629 | * Build it if needed by the arch config or the generic setup is going | |
2630 | * to be used. | |
2631 | */ | |
08fc4580 TH |
2632 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
2633 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | |
3c9a024f TH |
2634 | #define BUILD_EMBED_FIRST_CHUNK |
2635 | #endif | |
2636 | ||
2637 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | |
2638 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | |
2639 | #define BUILD_PAGE_FIRST_CHUNK | |
2640 | #endif | |
2641 | ||
2642 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | |
2643 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | |
2644 | /** | |
2645 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | |
2646 | * @reserved_size: the size of reserved percpu area in bytes | |
2647 | * @dyn_size: minimum free size for dynamic allocation in bytes | |
2648 | * @atom_size: allocation atom size | |
2649 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
2650 | * | |
2651 | * This function determines grouping of units, their mappings to cpus | |
2652 | * and other parameters considering needed percpu size, allocation | |
2653 | * atom size and distances between CPUs. | |
2654 | * | |
bffc4375 | 2655 | * Groups are always multiples of atom size and CPUs which are of |
3c9a024f TH |
2656 | * LOCAL_DISTANCE both ways are grouped together and share space for |
2657 | * units in the same group. The returned configuration is guaranteed | |
2658 | * to have CPUs on different nodes on different groups and >=75% usage | |
2659 | * of allocated virtual address space. | |
2660 | * | |
2661 | * RETURNS: | |
2662 | * On success, pointer to the new allocation_info is returned. On | |
2663 | * failure, ERR_PTR value is returned. | |
2664 | */ | |
2665 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |
2666 | size_t reserved_size, size_t dyn_size, | |
2667 | size_t atom_size, | |
2668 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | |
2669 | { | |
2670 | static int group_map[NR_CPUS] __initdata; | |
2671 | static int group_cnt[NR_CPUS] __initdata; | |
2672 | const size_t static_size = __per_cpu_end - __per_cpu_start; | |
2673 | int nr_groups = 1, nr_units = 0; | |
2674 | size_t size_sum, min_unit_size, alloc_size; | |
3f649ab7 | 2675 | int upa, max_upa, best_upa; /* units_per_alloc */ |
3c9a024f TH |
2676 | int last_allocs, group, unit; |
2677 | unsigned int cpu, tcpu; | |
2678 | struct pcpu_alloc_info *ai; | |
2679 | unsigned int *cpu_map; | |
2680 | ||
2681 | /* this function may be called multiple times */ | |
2682 | memset(group_map, 0, sizeof(group_map)); | |
2683 | memset(group_cnt, 0, sizeof(group_cnt)); | |
2684 | ||
2685 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | |
2686 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
2687 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | |
2688 | dyn_size = size_sum - static_size - reserved_size; | |
2689 | ||
2690 | /* | |
2691 | * Determine min_unit_size, alloc_size and max_upa such that | |
2692 | * alloc_size is multiple of atom_size and is the smallest | |
25985edc | 2693 | * which can accommodate 4k aligned segments which are equal to |
3c9a024f TH |
2694 | * or larger than min_unit_size. |
2695 | */ | |
2696 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
2697 | ||
9c015162 | 2698 | /* determine the maximum # of units that can fit in an allocation */ |
3c9a024f TH |
2699 | alloc_size = roundup(min_unit_size, atom_size); |
2700 | upa = alloc_size / min_unit_size; | |
f09f1243 | 2701 | while (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024f TH |
2702 | upa--; |
2703 | max_upa = upa; | |
2704 | ||
2705 | /* group cpus according to their proximity */ | |
2706 | for_each_possible_cpu(cpu) { | |
2707 | group = 0; | |
2708 | next_group: | |
2709 | for_each_possible_cpu(tcpu) { | |
2710 | if (cpu == tcpu) | |
2711 | break; | |
2712 | if (group_map[tcpu] == group && cpu_distance_fn && | |
2713 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | |
2714 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | |
2715 | group++; | |
2716 | nr_groups = max(nr_groups, group + 1); | |
2717 | goto next_group; | |
2718 | } | |
2719 | } | |
2720 | group_map[cpu] = group; | |
2721 | group_cnt[group]++; | |
2722 | } | |
2723 | ||
2724 | /* | |
9c015162 DZF |
2725 | * Wasted space is caused by a ratio imbalance of upa to group_cnt. |
2726 | * Expand the unit_size until we use >= 75% of the units allocated. | |
2727 | * Related to atom_size, which could be much larger than the unit_size. | |
3c9a024f TH |
2728 | */ |
2729 | last_allocs = INT_MAX; | |
2730 | for (upa = max_upa; upa; upa--) { | |
2731 | int allocs = 0, wasted = 0; | |
2732 | ||
f09f1243 | 2733 | if (alloc_size % upa || (offset_in_page(alloc_size / upa))) |
3c9a024f TH |
2734 | continue; |
2735 | ||
2736 | for (group = 0; group < nr_groups; group++) { | |
2737 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | |
2738 | allocs += this_allocs; | |
2739 | wasted += this_allocs * upa - group_cnt[group]; | |
2740 | } | |
2741 | ||
2742 | /* | |
2743 | * Don't accept if wastage is over 1/3. The | |
2744 | * greater-than comparison ensures upa==1 always | |
2745 | * passes the following check. | |
2746 | */ | |
2747 | if (wasted > num_possible_cpus() / 3) | |
2748 | continue; | |
2749 | ||
2750 | /* and then don't consume more memory */ | |
2751 | if (allocs > last_allocs) | |
2752 | break; | |
2753 | last_allocs = allocs; | |
2754 | best_upa = upa; | |
2755 | } | |
2756 | upa = best_upa; | |
2757 | ||
2758 | /* allocate and fill alloc_info */ | |
2759 | for (group = 0; group < nr_groups; group++) | |
2760 | nr_units += roundup(group_cnt[group], upa); | |
2761 | ||
2762 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | |
2763 | if (!ai) | |
2764 | return ERR_PTR(-ENOMEM); | |
2765 | cpu_map = ai->groups[0].cpu_map; | |
2766 | ||
2767 | for (group = 0; group < nr_groups; group++) { | |
2768 | ai->groups[group].cpu_map = cpu_map; | |
2769 | cpu_map += roundup(group_cnt[group], upa); | |
2770 | } | |
2771 | ||
2772 | ai->static_size = static_size; | |
2773 | ai->reserved_size = reserved_size; | |
2774 | ai->dyn_size = dyn_size; | |
2775 | ai->unit_size = alloc_size / upa; | |
2776 | ai->atom_size = atom_size; | |
2777 | ai->alloc_size = alloc_size; | |
2778 | ||
2de7852f | 2779 | for (group = 0, unit = 0; group < nr_groups; group++) { |
3c9a024f TH |
2780 | struct pcpu_group_info *gi = &ai->groups[group]; |
2781 | ||
2782 | /* | |
2783 | * Initialize base_offset as if all groups are located | |
2784 | * back-to-back. The caller should update this to | |
2785 | * reflect actual allocation. | |
2786 | */ | |
2787 | gi->base_offset = unit * ai->unit_size; | |
2788 | ||
2789 | for_each_possible_cpu(cpu) | |
2790 | if (group_map[cpu] == group) | |
2791 | gi->cpu_map[gi->nr_units++] = cpu; | |
2792 | gi->nr_units = roundup(gi->nr_units, upa); | |
2793 | unit += gi->nr_units; | |
2794 | } | |
2795 | BUG_ON(unit != nr_units); | |
2796 | ||
2797 | return ai; | |
2798 | } | |
2799 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | |
2800 | ||
2801 | #if defined(BUILD_EMBED_FIRST_CHUNK) | |
66c3a757 TH |
2802 | /** |
2803 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
66c3a757 | 2804 | * @reserved_size: the size of reserved percpu area in bytes |
4ba6ce25 | 2805 | * @dyn_size: minimum free size for dynamic allocation in bytes |
c8826dd5 TH |
2806 | * @atom_size: allocation atom size |
2807 | * @cpu_distance_fn: callback to determine distance between cpus, optional | |
2808 | * @alloc_fn: function to allocate percpu page | |
25985edc | 2809 | * @free_fn: function to free percpu page |
66c3a757 TH |
2810 | * |
2811 | * This is a helper to ease setting up embedded first percpu chunk and | |
2812 | * can be called where pcpu_setup_first_chunk() is expected. | |
2813 | * | |
2814 | * If this function is used to setup the first chunk, it is allocated | |
c8826dd5 TH |
2815 | * by calling @alloc_fn and used as-is without being mapped into |
2816 | * vmalloc area. Allocations are always whole multiples of @atom_size | |
2817 | * aligned to @atom_size. | |
2818 | * | |
2819 | * This enables the first chunk to piggy back on the linear physical | |
2820 | * mapping which often uses larger page size. Please note that this | |
2821 | * can result in very sparse cpu->unit mapping on NUMA machines thus | |
2822 | * requiring large vmalloc address space. Don't use this allocator if | |
2823 | * vmalloc space is not orders of magnitude larger than distances | |
2824 | * between node memory addresses (ie. 32bit NUMA machines). | |
66c3a757 | 2825 | * |
4ba6ce25 | 2826 | * @dyn_size specifies the minimum dynamic area size. |
66c3a757 TH |
2827 | * |
2828 | * If the needed size is smaller than the minimum or specified unit | |
c8826dd5 | 2829 | * size, the leftover is returned using @free_fn. |
66c3a757 TH |
2830 | * |
2831 | * RETURNS: | |
fb435d52 | 2832 | * 0 on success, -errno on failure. |
66c3a757 | 2833 | */ |
4ba6ce25 | 2834 | int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
c8826dd5 TH |
2835 | size_t atom_size, |
2836 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
2837 | pcpu_fc_alloc_fn_t alloc_fn, | |
2838 | pcpu_fc_free_fn_t free_fn) | |
66c3a757 | 2839 | { |
c8826dd5 TH |
2840 | void *base = (void *)ULONG_MAX; |
2841 | void **areas = NULL; | |
fd1e8a1f | 2842 | struct pcpu_alloc_info *ai; |
93c76b6b | 2843 | size_t size_sum, areas_size; |
2844 | unsigned long max_distance; | |
163fa234 | 2845 | int group, i, highest_group, rc = 0; |
66c3a757 | 2846 | |
c8826dd5 TH |
2847 | ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, |
2848 | cpu_distance_fn); | |
fd1e8a1f TH |
2849 | if (IS_ERR(ai)) |
2850 | return PTR_ERR(ai); | |
66c3a757 | 2851 | |
fd1e8a1f | 2852 | size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; |
c8826dd5 | 2853 | areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); |
fa8a7094 | 2854 | |
26fb3dae | 2855 | areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); |
c8826dd5 | 2856 | if (!areas) { |
fb435d52 | 2857 | rc = -ENOMEM; |
c8826dd5 | 2858 | goto out_free; |
fa8a7094 | 2859 | } |
66c3a757 | 2860 | |
9b739662 | 2861 | /* allocate, copy and determine base address & max_distance */ |
2862 | highest_group = 0; | |
c8826dd5 TH |
2863 | for (group = 0; group < ai->nr_groups; group++) { |
2864 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2865 | unsigned int cpu = NR_CPUS; | |
2866 | void *ptr; | |
2867 | ||
2868 | for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) | |
2869 | cpu = gi->cpu_map[i]; | |
2870 | BUG_ON(cpu == NR_CPUS); | |
2871 | ||
2872 | /* allocate space for the whole group */ | |
2873 | ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); | |
2874 | if (!ptr) { | |
2875 | rc = -ENOMEM; | |
2876 | goto out_free_areas; | |
2877 | } | |
f528f0b8 CM |
2878 | /* kmemleak tracks the percpu allocations separately */ |
2879 | kmemleak_free(ptr); | |
c8826dd5 | 2880 | areas[group] = ptr; |
fd1e8a1f | 2881 | |
c8826dd5 | 2882 | base = min(ptr, base); |
9b739662 | 2883 | if (ptr > areas[highest_group]) |
2884 | highest_group = group; | |
2885 | } | |
2886 | max_distance = areas[highest_group] - base; | |
2887 | max_distance += ai->unit_size * ai->groups[highest_group].nr_units; | |
2888 | ||
2889 | /* warn if maximum distance is further than 75% of vmalloc space */ | |
2890 | if (max_distance > VMALLOC_TOTAL * 3 / 4) { | |
2891 | pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", | |
2892 | max_distance, VMALLOC_TOTAL); | |
2893 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | |
2894 | /* and fail if we have fallback */ | |
2895 | rc = -EINVAL; | |
2896 | goto out_free_areas; | |
2897 | #endif | |
42b64281 TH |
2898 | } |
2899 | ||
2900 | /* | |
2901 | * Copy data and free unused parts. This should happen after all | |
2902 | * allocations are complete; otherwise, we may end up with | |
2903 | * overlapping groups. | |
2904 | */ | |
2905 | for (group = 0; group < ai->nr_groups; group++) { | |
2906 | struct pcpu_group_info *gi = &ai->groups[group]; | |
2907 | void *ptr = areas[group]; | |
c8826dd5 TH |
2908 | |
2909 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | |
2910 | if (gi->cpu_map[i] == NR_CPUS) { | |
2911 | /* unused unit, free whole */ | |
2912 | free_fn(ptr, ai->unit_size); | |
2913 | continue; | |
2914 | } | |
2915 | /* copy and return the unused part */ | |
2916 | memcpy(ptr, __per_cpu_load, ai->static_size); | |
2917 | free_fn(ptr + size_sum, ai->unit_size - size_sum); | |
2918 | } | |
fa8a7094 | 2919 | } |
66c3a757 | 2920 | |
c8826dd5 | 2921 | /* base address is now known, determine group base offsets */ |
6ea529a2 | 2922 | for (group = 0; group < ai->nr_groups; group++) { |
c8826dd5 | 2923 | ai->groups[group].base_offset = areas[group] - base; |
6ea529a2 | 2924 | } |
c8826dd5 | 2925 | |
00206a69 MC |
2926 | pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", |
2927 | PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, | |
fd1e8a1f | 2928 | ai->dyn_size, ai->unit_size); |
d4b95f80 | 2929 | |
163fa234 | 2930 | pcpu_setup_first_chunk(ai, base); |
c8826dd5 TH |
2931 | goto out_free; |
2932 | ||
2933 | out_free_areas: | |
2934 | for (group = 0; group < ai->nr_groups; group++) | |
f851c8d8 MH |
2935 | if (areas[group]) |
2936 | free_fn(areas[group], | |
2937 | ai->groups[group].nr_units * ai->unit_size); | |
c8826dd5 | 2938 | out_free: |
fd1e8a1f | 2939 | pcpu_free_alloc_info(ai); |
c8826dd5 | 2940 | if (areas) |
999c17e3 | 2941 | memblock_free_early(__pa(areas), areas_size); |
fb435d52 | 2942 | return rc; |
d4b95f80 | 2943 | } |
3c9a024f | 2944 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
d4b95f80 | 2945 | |
3c9a024f | 2946 | #ifdef BUILD_PAGE_FIRST_CHUNK |
d4b95f80 | 2947 | /** |
00ae4064 | 2948 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
d4b95f80 TH |
2949 | * @reserved_size: the size of reserved percpu area in bytes |
2950 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
25985edc | 2951 | * @free_fn: function to free percpu page, always called with PAGE_SIZE |
d4b95f80 TH |
2952 | * @populate_pte_fn: function to populate pte |
2953 | * | |
00ae4064 TH |
2954 | * This is a helper to ease setting up page-remapped first percpu |
2955 | * chunk and can be called where pcpu_setup_first_chunk() is expected. | |
d4b95f80 TH |
2956 | * |
2957 | * This is the basic allocator. Static percpu area is allocated | |
2958 | * page-by-page into vmalloc area. | |
2959 | * | |
2960 | * RETURNS: | |
fb435d52 | 2961 | * 0 on success, -errno on failure. |
d4b95f80 | 2962 | */ |
fb435d52 TH |
2963 | int __init pcpu_page_first_chunk(size_t reserved_size, |
2964 | pcpu_fc_alloc_fn_t alloc_fn, | |
2965 | pcpu_fc_free_fn_t free_fn, | |
2966 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
d4b95f80 | 2967 | { |
8f05a6a6 | 2968 | static struct vm_struct vm; |
fd1e8a1f | 2969 | struct pcpu_alloc_info *ai; |
00ae4064 | 2970 | char psize_str[16]; |
ce3141a2 | 2971 | int unit_pages; |
d4b95f80 | 2972 | size_t pages_size; |
ce3141a2 | 2973 | struct page **pages; |
163fa234 | 2974 | int unit, i, j, rc = 0; |
8f606604 | 2975 | int upa; |
2976 | int nr_g0_units; | |
d4b95f80 | 2977 | |
00ae4064 TH |
2978 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
2979 | ||
4ba6ce25 | 2980 | ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); |
fd1e8a1f TH |
2981 | if (IS_ERR(ai)) |
2982 | return PTR_ERR(ai); | |
2983 | BUG_ON(ai->nr_groups != 1); | |
8f606604 | 2984 | upa = ai->alloc_size/ai->unit_size; |
2985 | nr_g0_units = roundup(num_possible_cpus(), upa); | |
0b59c25f | 2986 | if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { |
8f606604 | 2987 | pcpu_free_alloc_info(ai); |
2988 | return -EINVAL; | |
2989 | } | |
fd1e8a1f TH |
2990 | |
2991 | unit_pages = ai->unit_size >> PAGE_SHIFT; | |
d4b95f80 TH |
2992 | |
2993 | /* unaligned allocations can't be freed, round up to page size */ | |
fd1e8a1f TH |
2994 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * |
2995 | sizeof(pages[0])); | |
7e1c4e27 | 2996 | pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); |
f655f405 MR |
2997 | if (!pages) |
2998 | panic("%s: Failed to allocate %zu bytes\n", __func__, | |
2999 | pages_size); | |
d4b95f80 | 3000 | |
8f05a6a6 | 3001 | /* allocate pages */ |
d4b95f80 | 3002 | j = 0; |
8f606604 | 3003 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
3004 | unsigned int cpu = ai->groups[0].cpu_map[unit]; | |
ce3141a2 | 3005 | for (i = 0; i < unit_pages; i++) { |
d4b95f80 TH |
3006 | void *ptr; |
3007 | ||
3cbc8565 | 3008 | ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); |
d4b95f80 | 3009 | if (!ptr) { |
870d4b12 | 3010 | pr_warn("failed to allocate %s page for cpu%u\n", |
8f606604 | 3011 | psize_str, cpu); |
d4b95f80 TH |
3012 | goto enomem; |
3013 | } | |
f528f0b8 CM |
3014 | /* kmemleak tracks the percpu allocations separately */ |
3015 | kmemleak_free(ptr); | |
ce3141a2 | 3016 | pages[j++] = virt_to_page(ptr); |
d4b95f80 | 3017 | } |
8f606604 | 3018 | } |
d4b95f80 | 3019 | |
8f05a6a6 TH |
3020 | /* allocate vm area, map the pages and copy static data */ |
3021 | vm.flags = VM_ALLOC; | |
fd1e8a1f | 3022 | vm.size = num_possible_cpus() * ai->unit_size; |
8f05a6a6 TH |
3023 | vm_area_register_early(&vm, PAGE_SIZE); |
3024 | ||
fd1e8a1f | 3025 | for (unit = 0; unit < num_possible_cpus(); unit++) { |
1d9d3257 | 3026 | unsigned long unit_addr = |
fd1e8a1f | 3027 | (unsigned long)vm.addr + unit * ai->unit_size; |
8f05a6a6 | 3028 | |
ce3141a2 | 3029 | for (i = 0; i < unit_pages; i++) |
8f05a6a6 TH |
3030 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
3031 | ||
3032 | /* pte already populated, the following shouldn't fail */ | |
fb435d52 TH |
3033 | rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], |
3034 | unit_pages); | |
3035 | if (rc < 0) | |
3036 | panic("failed to map percpu area, err=%d\n", rc); | |
66c3a757 | 3037 | |
8f05a6a6 TH |
3038 | /* |
3039 | * FIXME: Archs with virtual cache should flush local | |
3040 | * cache for the linear mapping here - something | |
3041 | * equivalent to flush_cache_vmap() on the local cpu. | |
3042 | * flush_cache_vmap() can't be used as most supporting | |
3043 | * data structures are not set up yet. | |
3044 | */ | |
3045 | ||
3046 | /* copy static data */ | |
fd1e8a1f | 3047 | memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |
66c3a757 TH |
3048 | } |
3049 | ||
3050 | /* we're ready, commit */ | |
00206a69 MC |
3051 | pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", |
3052 | unit_pages, psize_str, ai->static_size, | |
fd1e8a1f | 3053 | ai->reserved_size, ai->dyn_size); |
d4b95f80 | 3054 | |
163fa234 | 3055 | pcpu_setup_first_chunk(ai, vm.addr); |
d4b95f80 TH |
3056 | goto out_free_ar; |
3057 | ||
3058 | enomem: | |
3059 | while (--j >= 0) | |
ce3141a2 | 3060 | free_fn(page_address(pages[j]), PAGE_SIZE); |
fb435d52 | 3061 | rc = -ENOMEM; |
d4b95f80 | 3062 | out_free_ar: |
999c17e3 | 3063 | memblock_free_early(__pa(pages), pages_size); |
fd1e8a1f | 3064 | pcpu_free_alloc_info(ai); |
fb435d52 | 3065 | return rc; |
d4b95f80 | 3066 | } |
3c9a024f | 3067 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
d4b95f80 | 3068 | |
bbddff05 | 3069 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
e74e3962 | 3070 | /* |
bbddff05 | 3071 | * Generic SMP percpu area setup. |
e74e3962 TH |
3072 | * |
3073 | * The embedding helper is used because its behavior closely resembles | |
3074 | * the original non-dynamic generic percpu area setup. This is | |
3075 | * important because many archs have addressing restrictions and might | |
3076 | * fail if the percpu area is located far away from the previous | |
3077 | * location. As an added bonus, in non-NUMA cases, embedding is | |
3078 | * generally a good idea TLB-wise because percpu area can piggy back | |
3079 | * on the physical linear memory mapping which uses large page | |
3080 | * mappings on applicable archs. | |
3081 | */ | |
e74e3962 TH |
3082 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
3083 | EXPORT_SYMBOL(__per_cpu_offset); | |
3084 | ||
c8826dd5 TH |
3085 | static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, |
3086 | size_t align) | |
3087 | { | |
26fb3dae | 3088 | return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); |
c8826dd5 | 3089 | } |
66c3a757 | 3090 | |
c8826dd5 TH |
3091 | static void __init pcpu_dfl_fc_free(void *ptr, size_t size) |
3092 | { | |
999c17e3 | 3093 | memblock_free_early(__pa(ptr), size); |
c8826dd5 TH |
3094 | } |
3095 | ||
e74e3962 TH |
3096 | void __init setup_per_cpu_areas(void) |
3097 | { | |
e74e3962 TH |
3098 | unsigned long delta; |
3099 | unsigned int cpu; | |
fb435d52 | 3100 | int rc; |
e74e3962 TH |
3101 | |
3102 | /* | |
3103 | * Always reserve area for module percpu variables. That's | |
3104 | * what the legacy allocator did. | |
3105 | */ | |
fb435d52 | 3106 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
c8826dd5 TH |
3107 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
3108 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | |
fb435d52 | 3109 | if (rc < 0) |
bbddff05 | 3110 | panic("Failed to initialize percpu areas."); |
e74e3962 TH |
3111 | |
3112 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
3113 | for_each_possible_cpu(cpu) | |
fb435d52 | 3114 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
66c3a757 | 3115 | } |
bbddff05 TH |
3116 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
3117 | ||
3118 | #else /* CONFIG_SMP */ | |
3119 | ||
3120 | /* | |
3121 | * UP percpu area setup. | |
3122 | * | |
3123 | * UP always uses km-based percpu allocator with identity mapping. | |
3124 | * Static percpu variables are indistinguishable from the usual static | |
3125 | * variables and don't require any special preparation. | |
3126 | */ | |
3127 | void __init setup_per_cpu_areas(void) | |
3128 | { | |
3129 | const size_t unit_size = | |
3130 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | |
3131 | PERCPU_DYNAMIC_RESERVE)); | |
3132 | struct pcpu_alloc_info *ai; | |
3133 | void *fc; | |
3134 | ||
3135 | ai = pcpu_alloc_alloc_info(1, 1); | |
26fb3dae | 3136 | fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
bbddff05 TH |
3137 | if (!ai || !fc) |
3138 | panic("Failed to allocate memory for percpu areas."); | |
100d13c3 CM |
3139 | /* kmemleak tracks the percpu allocations separately */ |
3140 | kmemleak_free(fc); | |
bbddff05 TH |
3141 | |
3142 | ai->dyn_size = unit_size; | |
3143 | ai->unit_size = unit_size; | |
3144 | ai->atom_size = unit_size; | |
3145 | ai->alloc_size = unit_size; | |
3146 | ai->groups[0].nr_units = 1; | |
3147 | ai->groups[0].cpu_map[0] = 0; | |
3148 | ||
163fa234 | 3149 | pcpu_setup_first_chunk(ai, fc); |
438a5061 | 3150 | pcpu_free_alloc_info(ai); |
bbddff05 TH |
3151 | } |
3152 | ||
3153 | #endif /* CONFIG_SMP */ | |
099a19d9 | 3154 | |
7e8a6304 DZF |
3155 | /* |
3156 | * pcpu_nr_pages - calculate total number of populated backing pages | |
3157 | * | |
3158 | * This reflects the number of pages populated to back chunks. Metadata is | |
3159 | * excluded in the number exposed in meminfo as the number of backing pages | |
3160 | * scales with the number of cpus and can quickly outweigh the memory used for | |
3161 | * metadata. It also keeps this calculation nice and simple. | |
3162 | * | |
3163 | * RETURNS: | |
3164 | * Total number of populated backing pages in use by the allocator. | |
3165 | */ | |
3166 | unsigned long pcpu_nr_pages(void) | |
3167 | { | |
3168 | return pcpu_nr_populated * pcpu_nr_units; | |
3169 | } | |
3170 | ||
1a4d7607 TH |
3171 | /* |
3172 | * Percpu allocator is initialized early during boot when neither slab or | |
3173 | * workqueue is available. Plug async management until everything is up | |
3174 | * and running. | |
3175 | */ | |
3176 | static int __init percpu_enable_async(void) | |
3177 | { | |
3178 | pcpu_async_enabled = true; | |
3179 | return 0; | |
3180 | } | |
3181 | subsys_initcall(percpu_enable_async); |