]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * A fast, small, non-recursive O(nlog n) sort for the Linux kernel | |
4 | * | |
5 | * Jan 23 2005 Matt Mackall <[email protected]> | |
6 | */ | |
7 | ||
c5adae95 KF |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
42cf8096 RV |
10 | #include <linux/types.h> |
11 | #include <linux/export.h> | |
ecec4cb7 | 12 | #include <linux/sort.h> |
1da177e4 | 13 | |
ca96ab85 DW |
14 | static int alignment_ok(const void *base, int align) |
15 | { | |
16 | return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || | |
17 | ((unsigned long)base & (align - 1)) == 0; | |
18 | } | |
19 | ||
ecec4cb7 | 20 | static void u32_swap(void *a, void *b, int size) |
1da177e4 LT |
21 | { |
22 | u32 t = *(u32 *)a; | |
23 | *(u32 *)a = *(u32 *)b; | |
24 | *(u32 *)b = t; | |
25 | } | |
26 | ||
ca96ab85 DW |
27 | static void u64_swap(void *a, void *b, int size) |
28 | { | |
29 | u64 t = *(u64 *)a; | |
30 | *(u64 *)a = *(u64 *)b; | |
31 | *(u64 *)b = t; | |
32 | } | |
33 | ||
ecec4cb7 | 34 | static void generic_swap(void *a, void *b, int size) |
1da177e4 LT |
35 | { |
36 | char t; | |
37 | ||
38 | do { | |
39 | t = *(char *)a; | |
40 | *(char *)a++ = *(char *)b; | |
41 | *(char *)b++ = t; | |
42 | } while (--size > 0); | |
43 | } | |
44 | ||
72fd4a35 | 45 | /** |
1da177e4 LT |
46 | * sort - sort an array of elements |
47 | * @base: pointer to data to sort | |
48 | * @num: number of elements | |
49 | * @size: size of each element | |
b53907c0 WF |
50 | * @cmp_func: pointer to comparison function |
51 | * @swap_func: pointer to swap function or NULL | |
1da177e4 LT |
52 | * |
53 | * This function does a heapsort on the given array. You may provide a | |
b53907c0 | 54 | * swap_func function optimized to your element type. |
1da177e4 LT |
55 | * |
56 | * Sorting time is O(n log n) both on average and worst-case. While | |
57 | * qsort is about 20% faster on average, it suffers from exploitable | |
58 | * O(n*n) worst-case behavior and extra memory requirements that make | |
59 | * it less suitable for kernel use. | |
60 | */ | |
61 | ||
62 | void sort(void *base, size_t num, size_t size, | |
b53907c0 WF |
63 | int (*cmp_func)(const void *, const void *), |
64 | void (*swap_func)(void *, void *, int size)) | |
1da177e4 LT |
65 | { |
66 | /* pre-scale counters for performance */ | |
d3717bdf | 67 | int i = (num/2 - 1) * size, n = num * size, c, r; |
1da177e4 | 68 | |
ca96ab85 DW |
69 | if (!swap_func) { |
70 | if (size == 4 && alignment_ok(base, 4)) | |
71 | swap_func = u32_swap; | |
72 | else if (size == 8 && alignment_ok(base, 8)) | |
73 | swap_func = u64_swap; | |
74 | else | |
75 | swap_func = generic_swap; | |
76 | } | |
1da177e4 LT |
77 | |
78 | /* heapify */ | |
79 | for ( ; i >= 0; i -= size) { | |
d3717bdf | 80 | for (r = i; r * 2 + size < n; r = c) { |
81 | c = r * 2 + size; | |
b53907c0 WF |
82 | if (c < n - size && |
83 | cmp_func(base + c, base + c + size) < 0) | |
1da177e4 | 84 | c += size; |
b53907c0 | 85 | if (cmp_func(base + r, base + c) >= 0) |
1da177e4 | 86 | break; |
b53907c0 | 87 | swap_func(base + r, base + c, size); |
1da177e4 LT |
88 | } |
89 | } | |
90 | ||
91 | /* sort */ | |
995e4286 | 92 | for (i = n - size; i > 0; i -= size) { |
b53907c0 | 93 | swap_func(base, base + i, size); |
d3717bdf | 94 | for (r = 0; r * 2 + size < i; r = c) { |
95 | c = r * 2 + size; | |
b53907c0 WF |
96 | if (c < i - size && |
97 | cmp_func(base + c, base + c + size) < 0) | |
1da177e4 | 98 | c += size; |
b53907c0 | 99 | if (cmp_func(base + r, base + c) >= 0) |
1da177e4 | 100 | break; |
b53907c0 | 101 | swap_func(base + r, base + c, size); |
1da177e4 LT |
102 | } |
103 | } | |
104 | } | |
105 | ||
106 | EXPORT_SYMBOL(sort); |