]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
22a241cc | 3 | * A fast, small, non-recursive O(n log n) sort for the Linux kernel |
1da177e4 | 4 | * |
22a241cc GS |
5 | * This performs n*log2(n) + 0.37*n + o(n) comparisons on average, |
6 | * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case. | |
7 | * | |
8 | * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n | |
9 | * better) at the expense of stack usage and much larger code to avoid | |
10 | * quicksort's O(n^2) worst case. | |
1da177e4 LT |
11 | */ |
12 | ||
c5adae95 KF |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | ||
42cf8096 RV |
15 | #include <linux/types.h> |
16 | #include <linux/export.h> | |
ecec4cb7 | 17 | #include <linux/sort.h> |
1da177e4 | 18 | |
37d0ec34 GS |
19 | /** |
20 | * is_aligned - is this pointer & size okay for word-wide copying? | |
21 | * @base: pointer to data | |
22 | * @size: size of each element | |
22a241cc | 23 | * @align: required alignment (typically 4 or 8) |
37d0ec34 GS |
24 | * |
25 | * Returns true if elements can be copied using word loads and stores. | |
26 | * The size must be a multiple of the alignment, and the base address must | |
27 | * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. | |
28 | * | |
29 | * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" | |
30 | * to "if ((a | b) & mask)", so we do that by hand. | |
31 | */ | |
32 | __attribute_const__ __always_inline | |
33 | static bool is_aligned(const void *base, size_t size, unsigned char align) | |
ca96ab85 | 34 | { |
37d0ec34 GS |
35 | unsigned char lsbits = (unsigned char)size; |
36 | ||
37 | (void)base; | |
38 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | |
39 | lsbits |= (unsigned char)(uintptr_t)base; | |
40 | #endif | |
41 | return (lsbits & (align - 1)) == 0; | |
ca96ab85 DW |
42 | } |
43 | ||
37d0ec34 GS |
44 | /** |
45 | * swap_words_32 - swap two elements in 32-bit chunks | |
aa52619c RD |
46 | * @a: pointer to the first element to swap |
47 | * @b: pointer to the second element to swap | |
48 | * @n: element size (must be a multiple of 4) | |
37d0ec34 GS |
49 | * |
50 | * Exchange the two objects in memory. This exploits base+index addressing, | |
51 | * which basically all CPUs have, to minimize loop overhead computations. | |
52 | * | |
53 | * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the | |
9dbbc3b9 | 54 | * bottom of the loop, even though the zero flag is still valid from the |
37d0ec34 GS |
55 | * subtract (since the intervening mov instructions don't alter the flags). |
56 | * Gcc 8.1.0 doesn't have that problem. | |
57 | */ | |
8fb583c4 | 58 | static void swap_words_32(void *a, void *b, size_t n) |
1da177e4 | 59 | { |
37d0ec34 GS |
60 | do { |
61 | u32 t = *(u32 *)(a + (n -= 4)); | |
62 | *(u32 *)(a + n) = *(u32 *)(b + n); | |
63 | *(u32 *)(b + n) = t; | |
64 | } while (n); | |
1da177e4 LT |
65 | } |
66 | ||
37d0ec34 GS |
67 | /** |
68 | * swap_words_64 - swap two elements in 64-bit chunks | |
aa52619c RD |
69 | * @a: pointer to the first element to swap |
70 | * @b: pointer to the second element to swap | |
71 | * @n: element size (must be a multiple of 8) | |
37d0ec34 GS |
72 | * |
73 | * Exchange the two objects in memory. This exploits base+index | |
74 | * addressing, which basically all CPUs have, to minimize loop overhead | |
75 | * computations. | |
76 | * | |
77 | * We'd like to use 64-bit loads if possible. If they're not, emulating | |
78 | * one requires base+index+4 addressing which x86 has but most other | |
79 | * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, | |
80 | * but it's possible to have 64-bit loads without 64-bit pointers (e.g. | |
81 | * x32 ABI). Are there any cases the kernel needs to worry about? | |
82 | */ | |
8fb583c4 | 83 | static void swap_words_64(void *a, void *b, size_t n) |
ca96ab85 | 84 | { |
37d0ec34 GS |
85 | do { |
86 | #ifdef CONFIG_64BIT | |
87 | u64 t = *(u64 *)(a + (n -= 8)); | |
88 | *(u64 *)(a + n) = *(u64 *)(b + n); | |
89 | *(u64 *)(b + n) = t; | |
90 | #else | |
91 | /* Use two 32-bit transfers to avoid base+index+4 addressing */ | |
92 | u32 t = *(u32 *)(a + (n -= 4)); | |
93 | *(u32 *)(a + n) = *(u32 *)(b + n); | |
94 | *(u32 *)(b + n) = t; | |
95 | ||
96 | t = *(u32 *)(a + (n -= 4)); | |
97 | *(u32 *)(a + n) = *(u32 *)(b + n); | |
98 | *(u32 *)(b + n) = t; | |
99 | #endif | |
100 | } while (n); | |
ca96ab85 DW |
101 | } |
102 | ||
37d0ec34 GS |
103 | /** |
104 | * swap_bytes - swap two elements a byte at a time | |
aa52619c RD |
105 | * @a: pointer to the first element to swap |
106 | * @b: pointer to the second element to swap | |
107 | * @n: element size | |
37d0ec34 GS |
108 | * |
109 | * This is the fallback if alignment doesn't allow using larger chunks. | |
110 | */ | |
8fb583c4 | 111 | static void swap_bytes(void *a, void *b, size_t n) |
1da177e4 | 112 | { |
1da177e4 | 113 | do { |
37d0ec34 GS |
114 | char t = ((char *)a)[--n]; |
115 | ((char *)a)[n] = ((char *)b)[n]; | |
116 | ((char *)b)[n] = t; | |
117 | } while (n); | |
1da177e4 LT |
118 | } |
119 | ||
8fb583c4 GS |
120 | /* |
121 | * The values are arbitrary as long as they can't be confused with | |
122 | * a pointer, but small integers make for the smallest compare | |
123 | * instructions. | |
124 | */ | |
125 | #define SWAP_WORDS_64 (swap_func_t)0 | |
126 | #define SWAP_WORDS_32 (swap_func_t)1 | |
127 | #define SWAP_BYTES (swap_func_t)2 | |
128 | ||
129 | /* | |
130 | * The function pointer is last to make tail calls most efficient if the | |
131 | * compiler decides not to inline this function. | |
132 | */ | |
133 | static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) | |
134 | { | |
135 | if (swap_func == SWAP_WORDS_64) | |
136 | swap_words_64(a, b, size); | |
137 | else if (swap_func == SWAP_WORDS_32) | |
138 | swap_words_32(a, b, size); | |
139 | else if (swap_func == SWAP_BYTES) | |
140 | swap_bytes(a, b, size); | |
141 | else | |
142 | swap_func(a, b, (int)size); | |
143 | } | |
144 | ||
4333fb96 RV |
145 | #define _CMP_WRAPPER ((cmp_r_func_t)0L) |
146 | ||
52ae533b | 147 | static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) |
4333fb96 RV |
148 | { |
149 | if (cmp == _CMP_WRAPPER) | |
150 | return ((cmp_func_t)(priv))(a, b); | |
151 | return cmp(a, b, priv); | |
152 | } | |
153 | ||
22a241cc GS |
154 | /** |
155 | * parent - given the offset of the child, find the offset of the parent. | |
156 | * @i: the offset of the heap element whose parent is sought. Non-zero. | |
157 | * @lsbit: a precomputed 1-bit mask, equal to "size & -size" | |
158 | * @size: size of each element | |
159 | * | |
160 | * In terms of array indexes, the parent of element j = @i/@size is simply | |
161 | * (j-1)/2. But when working in byte offsets, we can't use implicit | |
162 | * truncation of integer divides. | |
163 | * | |
164 | * Fortunately, we only need one bit of the quotient, not the full divide. | |
165 | * @size has a least significant bit. That bit will be clear if @i is | |
166 | * an even multiple of @size, and set if it's an odd multiple. | |
167 | * | |
168 | * Logically, we're doing "if (i & lsbit) i -= size;", but since the | |
169 | * branch is unpredictable, it's done with a bit of clever branch-free | |
170 | * code instead. | |
171 | */ | |
172 | __attribute_const__ __always_inline | |
173 | static size_t parent(size_t i, unsigned int lsbit, size_t size) | |
174 | { | |
175 | i -= size; | |
176 | i -= size & -(i & lsbit); | |
177 | return i / 2; | |
178 | } | |
179 | ||
72fd4a35 | 180 | /** |
4333fb96 | 181 | * sort_r - sort an array of elements |
1da177e4 LT |
182 | * @base: pointer to data to sort |
183 | * @num: number of elements | |
184 | * @size: size of each element | |
b53907c0 WF |
185 | * @cmp_func: pointer to comparison function |
186 | * @swap_func: pointer to swap function or NULL | |
4333fb96 | 187 | * @priv: third argument passed to comparison function |
1da177e4 | 188 | * |
37d0ec34 GS |
189 | * This function does a heapsort on the given array. You may provide |
190 | * a swap_func function if you need to do something more than a memory | |
191 | * copy (e.g. fix up pointers or auxiliary data), but the built-in swap | |
8fb583c4 | 192 | * avoids a slow retpoline and so is significantly faster. |
1da177e4 LT |
193 | * |
194 | * Sorting time is O(n log n) both on average and worst-case. While | |
22a241cc | 195 | * quicksort is slightly faster on average, it suffers from exploitable |
1da177e4 LT |
196 | * O(n*n) worst-case behavior and extra memory requirements that make |
197 | * it less suitable for kernel use. | |
198 | */ | |
4333fb96 | 199 | void sort_r(void *base, size_t num, size_t size, |
52ae533b AS |
200 | cmp_r_func_t cmp_func, |
201 | swap_func_t swap_func, | |
4333fb96 | 202 | const void *priv) |
1da177e4 LT |
203 | { |
204 | /* pre-scale counters for performance */ | |
22a241cc GS |
205 | size_t n = num * size, a = (num/2) * size; |
206 | const unsigned int lsbit = size & -size; /* Used to find parent */ | |
207 | ||
208 | if (!a) /* num < 2 || size == 0 */ | |
209 | return; | |
1da177e4 | 210 | |
ca96ab85 | 211 | if (!swap_func) { |
37d0ec34 | 212 | if (is_aligned(base, size, 8)) |
8fb583c4 | 213 | swap_func = SWAP_WORDS_64; |
37d0ec34 | 214 | else if (is_aligned(base, size, 4)) |
8fb583c4 | 215 | swap_func = SWAP_WORDS_32; |
ca96ab85 | 216 | else |
8fb583c4 | 217 | swap_func = SWAP_BYTES; |
ca96ab85 | 218 | } |
1da177e4 | 219 | |
22a241cc GS |
220 | /* |
221 | * Loop invariants: | |
222 | * 1. elements [a,n) satisfy the heap property (compare greater than | |
223 | * all of their children), | |
224 | * 2. elements [n,num*size) are sorted, and | |
225 | * 3. a <= b <= c <= d <= n (whenever they are valid). | |
226 | */ | |
227 | for (;;) { | |
228 | size_t b, c, d; | |
229 | ||
230 | if (a) /* Building heap: sift down --a */ | |
231 | a -= size; | |
232 | else if (n -= size) /* Sorting: Extract root to --n */ | |
8fb583c4 | 233 | do_swap(base, base + n, size, swap_func); |
22a241cc GS |
234 | else /* Sort complete */ |
235 | break; | |
236 | ||
237 | /* | |
238 | * Sift element at "a" down into heap. This is the | |
239 | * "bottom-up" variant, which significantly reduces | |
240 | * calls to cmp_func(): we find the sift-down path all | |
241 | * the way to the leaves (one compare per level), then | |
242 | * backtrack to find where to insert the target element. | |
243 | * | |
244 | * Because elements tend to sift down close to the leaves, | |
245 | * this uses fewer compares than doing two per level | |
246 | * on the way down. (A bit more than half as many on | |
247 | * average, 3/4 worst-case.) | |
248 | */ | |
249 | for (b = a; c = 2*b + size, (d = c + size) < n;) | |
4333fb96 | 250 | b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d; |
22a241cc GS |
251 | if (d == n) /* Special case last leaf with no sibling */ |
252 | b = c; | |
253 | ||
254 | /* Now backtrack from "b" to the correct location for "a" */ | |
4333fb96 | 255 | while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0) |
22a241cc GS |
256 | b = parent(b, lsbit, size); |
257 | c = b; /* Where "a" belongs */ | |
258 | while (b != a) { /* Shift it into place */ | |
259 | b = parent(b, lsbit, size); | |
8fb583c4 | 260 | do_swap(base + b, base + c, size, swap_func); |
1da177e4 LT |
261 | } |
262 | } | |
263 | } | |
4333fb96 RV |
264 | EXPORT_SYMBOL(sort_r); |
265 | ||
266 | void sort(void *base, size_t num, size_t size, | |
52ae533b AS |
267 | cmp_func_t cmp_func, |
268 | swap_func_t swap_func) | |
4333fb96 RV |
269 | { |
270 | return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func); | |
271 | } | |
1da177e4 | 272 | EXPORT_SYMBOL(sort); |