]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
6e295186 SG |
2 | /* |
3 | * Copyright (c) 2015 Google, Inc | |
6e295186 SG |
4 | */ |
5 | ||
6 | #ifndef __ALIGNMEM_H | |
7 | #define __ALIGNMEM_H | |
8 | ||
9 | /* | |
10 | * ARCH_DMA_MINALIGN is defined in asm/cache.h for each architecture. It | |
11 | * is used to align DMA buffers. | |
12 | */ | |
13 | #ifndef __ASSEMBLY__ | |
14 | #include <asm/cache.h> | |
6e295186 SG |
15 | #include <malloc.h> |
16 | ||
cf92e05c SG |
17 | /* |
18 | * The ALLOC_CACHE_ALIGN_BUFFER macro is used to allocate a buffer on the | |
19 | * stack that meets the minimum architecture alignment requirements for DMA. | |
20 | * Such a buffer is useful for DMA operations where flushing and invalidating | |
21 | * the cache before and after a read and/or write operation is required for | |
22 | * correct operations. | |
23 | * | |
24 | * When called the macro creates an array on the stack that is sized such | |
25 | * that: | |
26 | * | |
27 | * 1) The beginning of the array can be advanced enough to be aligned. | |
28 | * | |
29 | * 2) The size of the aligned portion of the array is a multiple of the minimum | |
30 | * architecture alignment required for DMA. | |
31 | * | |
32 | * 3) The aligned portion contains enough space for the original number of | |
33 | * elements requested. | |
34 | * | |
35 | * The macro then creates a pointer to the aligned portion of this array and | |
36 | * assigns to the pointer the address of the first element in the aligned | |
37 | * portion of the array. | |
38 | * | |
39 | * Calling the macro as: | |
40 | * | |
41 | * ALLOC_CACHE_ALIGN_BUFFER(uint32_t, buffer, 1024); | |
42 | * | |
43 | * Will result in something similar to saying: | |
44 | * | |
45 | * uint32_t buffer[1024]; | |
46 | * | |
47 | * The following differences exist: | |
48 | * | |
49 | * 1) The resulting buffer is guaranteed to be aligned to the value of | |
50 | * ARCH_DMA_MINALIGN. | |
51 | * | |
52 | * 2) The buffer variable created by the macro is a pointer to the specified | |
53 | * type, and NOT an array of the specified type. This can be very important | |
54 | * if you want the address of the buffer, which you probably do, to pass it | |
55 | * to the DMA hardware. The value of &buffer is different in the two cases. | |
56 | * In the macro case it will be the address of the pointer, not the address | |
57 | * of the space reserved for the buffer. However, in the second case it | |
58 | * would be the address of the buffer. So if you are replacing hard coded | |
59 | * stack buffers with this macro you need to make sure you remove the & from | |
60 | * the locations where you are taking the address of the buffer. | |
61 | * | |
62 | * Note that the size parameter is the number of array elements to allocate, | |
63 | * not the number of bytes. | |
64 | * | |
65 | * This macro can not be used outside of function scope, or for the creation | |
66 | * of a function scoped static buffer. It can not be used to create a cache | |
67 | * line aligned global buffer. | |
68 | */ | |
69 | #define PAD_COUNT(s, pad) (((s) - 1) / (pad) + 1) | |
70 | #define PAD_SIZE(s, pad) (PAD_COUNT(s, pad) * pad) | |
71 | #define ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, pad) \ | |
72 | char __##name[ROUND(PAD_SIZE((size) * sizeof(type), pad), align) \ | |
73 | + (align - 1)]; \ | |
74 | \ | |
75 | type *name = (type *)ALIGN((uintptr_t)__##name, align) | |
76 | #define ALLOC_ALIGN_BUFFER(type, name, size, align) \ | |
77 | ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, 1) | |
78 | #define ALLOC_CACHE_ALIGN_BUFFER_PAD(type, name, size, pad) \ | |
79 | ALLOC_ALIGN_BUFFER_PAD(type, name, size, ARCH_DMA_MINALIGN, pad) | |
80 | #define ALLOC_CACHE_ALIGN_BUFFER(type, name, size) \ | |
81 | ALLOC_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN) | |
82 | ||
83 | /* | |
84 | * DEFINE_CACHE_ALIGN_BUFFER() is similar to ALLOC_CACHE_ALIGN_BUFFER, but it's | |
85 | * purpose is to allow allocating aligned buffers outside of function scope. | |
86 | * Usage of this macro shall be avoided or used with extreme care! | |
87 | */ | |
88 | #define DEFINE_ALIGN_BUFFER(type, name, size, align) \ | |
89 | static char __##name[ALIGN(size * sizeof(type), align)] \ | |
90 | __aligned(align); \ | |
91 | \ | |
92 | static type *name = (type *)__##name | |
93 | #define DEFINE_CACHE_ALIGN_BUFFER(type, name, size) \ | |
94 | DEFINE_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN) | |
95 | ||
96 | /** | |
97 | * malloc_cache_aligned() - allocate a memory region aligned to cache line size | |
98 | * | |
99 | * This allocates memory at a cache-line boundary. The amount allocated may | |
100 | * be larger than requested as it is rounded up to the nearest multiple of the | |
101 | * cache-line size. This ensured that subsequent cache operations on this | |
102 | * memory (flush, invalidate) will not affect subsequently allocated regions. | |
103 | * | |
104 | * @size: Minimum number of bytes to allocate | |
105 | * | |
106 | * @return pointer to new memory region, or NULL if there is no more memory | |
107 | * available. | |
108 | */ | |
6e295186 SG |
109 | static inline void *malloc_cache_aligned(size_t size) |
110 | { | |
111 | return memalign(ARCH_DMA_MINALIGN, ALIGN(size, ARCH_DMA_MINALIGN)); | |
112 | } | |
113 | #endif | |
114 | ||
115 | #endif /* __ALIGNMEM_H */ |