]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * A fast, small, non-recursive O(nlog n) sort for the Linux kernel | |
3 | * | |
4 | * Jan 23 2005 Matt Mackall <[email protected]> | |
5 | */ | |
6 | ||
c5adae95 KF |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | ||
42cf8096 RV |
9 | #include <linux/types.h> |
10 | #include <linux/export.h> | |
ecec4cb7 | 11 | #include <linux/sort.h> |
1da177e4 | 12 | |
ca96ab85 DW |
13 | static int alignment_ok(const void *base, int align) |
14 | { | |
15 | return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || | |
16 | ((unsigned long)base & (align - 1)) == 0; | |
17 | } | |
18 | ||
ecec4cb7 | 19 | static void u32_swap(void *a, void *b, int size) |
1da177e4 LT |
20 | { |
21 | u32 t = *(u32 *)a; | |
22 | *(u32 *)a = *(u32 *)b; | |
23 | *(u32 *)b = t; | |
24 | } | |
25 | ||
ca96ab85 DW |
26 | static void u64_swap(void *a, void *b, int size) |
27 | { | |
28 | u64 t = *(u64 *)a; | |
29 | *(u64 *)a = *(u64 *)b; | |
30 | *(u64 *)b = t; | |
31 | } | |
32 | ||
ecec4cb7 | 33 | static void generic_swap(void *a, void *b, int size) |
1da177e4 LT |
34 | { |
35 | char t; | |
36 | ||
37 | do { | |
38 | t = *(char *)a; | |
39 | *(char *)a++ = *(char *)b; | |
40 | *(char *)b++ = t; | |
41 | } while (--size > 0); | |
42 | } | |
43 | ||
72fd4a35 | 44 | /** |
1da177e4 LT |
45 | * sort - sort an array of elements |
46 | * @base: pointer to data to sort | |
47 | * @num: number of elements | |
48 | * @size: size of each element | |
b53907c0 WF |
49 | * @cmp_func: pointer to comparison function |
50 | * @swap_func: pointer to swap function or NULL | |
1da177e4 LT |
51 | * |
52 | * This function does a heapsort on the given array. You may provide a | |
b53907c0 | 53 | * swap_func function optimized to your element type. |
1da177e4 LT |
54 | * |
55 | * Sorting time is O(n log n) both on average and worst-case. While | |
56 | * qsort is about 20% faster on average, it suffers from exploitable | |
57 | * O(n*n) worst-case behavior and extra memory requirements that make | |
58 | * it less suitable for kernel use. | |
59 | */ | |
60 | ||
61 | void sort(void *base, size_t num, size_t size, | |
b53907c0 WF |
62 | int (*cmp_func)(const void *, const void *), |
63 | void (*swap_func)(void *, void *, int size)) | |
1da177e4 LT |
64 | { |
65 | /* pre-scale counters for performance */ | |
d3717bdf | 66 | int i = (num/2 - 1) * size, n = num * size, c, r; |
1da177e4 | 67 | |
ca96ab85 DW |
68 | if (!swap_func) { |
69 | if (size == 4 && alignment_ok(base, 4)) | |
70 | swap_func = u32_swap; | |
71 | else if (size == 8 && alignment_ok(base, 8)) | |
72 | swap_func = u64_swap; | |
73 | else | |
74 | swap_func = generic_swap; | |
75 | } | |
1da177e4 LT |
76 | |
77 | /* heapify */ | |
78 | for ( ; i >= 0; i -= size) { | |
d3717bdf | 79 | for (r = i; r * 2 + size < n; r = c) { |
80 | c = r * 2 + size; | |
b53907c0 WF |
81 | if (c < n - size && |
82 | cmp_func(base + c, base + c + size) < 0) | |
1da177e4 | 83 | c += size; |
b53907c0 | 84 | if (cmp_func(base + r, base + c) >= 0) |
1da177e4 | 85 | break; |
b53907c0 | 86 | swap_func(base + r, base + c, size); |
1da177e4 LT |
87 | } |
88 | } | |
89 | ||
90 | /* sort */ | |
995e4286 | 91 | for (i = n - size; i > 0; i -= size) { |
b53907c0 | 92 | swap_func(base, base + i, size); |
d3717bdf | 93 | for (r = 0; r * 2 + size < i; r = c) { |
94 | c = r * 2 + size; | |
b53907c0 WF |
95 | if (c < i - size && |
96 | cmp_func(base + c, base + c + size) < 0) | |
1da177e4 | 97 | c += size; |
b53907c0 | 98 | if (cmp_func(base + r, base + c) >= 0) |
1da177e4 | 99 | break; |
b53907c0 | 100 | swap_func(base + r, base + c, size); |
1da177e4 LT |
101 | } |
102 | } | |
103 | } | |
104 | ||
105 | EXPORT_SYMBOL(sort); |