b24413180f560 (Greg Kroah-Hartman 2017-11-01 15:07:57 +0100 1) // SPDX-License-Identifier: GPL-2.0
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 2) /*
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 3) * A fast, small, non-recursive O(n log n) sort for the Linux kernel
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 4) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 5) * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 6) * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 7) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 8) * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 9) * better) at the expense of stack usage and much larger code to avoid
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 10) * quicksort's O(n^2) worst case.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 11) */
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 12)
c5adae9583ef6 (Kostenzer Felix 2017-02-24 15:01:07 -0800 13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
c5adae9583ef6 (Kostenzer Felix 2017-02-24 15:01:07 -0800 14)
42cf809654e4e (Rasmus Villemoes 2015-02-12 15:02:35 -0800 15) #include <linux/types.h>
42cf809654e4e (Rasmus Villemoes 2015-02-12 15:02:35 -0800 16) #include <linux/export.h>
ecec4cb7a9df5 (Adrian Bunk 2005-09-10 00:26:59 -0700 17) #include <linux/sort.h>
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 18)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 19) /**
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 20) * is_aligned - is this pointer & size okay for word-wide copying?
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 21) * @base: pointer to data
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 22) * @size: size of each element
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 23) * @align: required alignment (typically 4 or 8)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 24) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 25) * Returns true if elements can be copied using word loads and stores.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 26) * The size must be a multiple of the alignment, and the base address must
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 27) * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 28) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 29) * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 30) * to "if ((a | b) & mask)", so we do that by hand.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 31) */
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 32) __attribute_const__ __always_inline
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 33) static bool is_aligned(const void *base, size_t size, unsigned char align)
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 34) {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 35) unsigned char lsbits = (unsigned char)size;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 36)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 37) (void)base;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 38) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 39) lsbits |= (unsigned char)(uintptr_t)base;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 40) #endif
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 41) return (lsbits & (align - 1)) == 0;
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 42) }
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 43)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 44) /**
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 45) * swap_words_32 - swap two elements in 32-bit chunks
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 46) * @a: pointer to the first element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 47) * @b: pointer to the second element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 48) * @n: element size (must be a multiple of 4)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 49) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 50) * Exchange the two objects in memory. This exploits base+index addressing,
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 51) * which basically all CPUs have, to minimize loop overhead computations.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 52) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 53) * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 54) * bottom of the loop, even though the zero flag is stil valid from the
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 55) * subtract (since the intervening mov instructions don't alter the flags).
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 56) * Gcc 8.1.0 doesn't have that problem.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 57) */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 58) static void swap_words_32(void *a, void *b, size_t n)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 59) {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 60) do {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 61) u32 t = *(u32 *)(a + (n -= 4));
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 62) *(u32 *)(a + n) = *(u32 *)(b + n);
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 63) *(u32 *)(b + n) = t;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 64) } while (n);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 65) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 66)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 67) /**
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 68) * swap_words_64 - swap two elements in 64-bit chunks
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 69) * @a: pointer to the first element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 70) * @b: pointer to the second element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 71) * @n: element size (must be a multiple of 8)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 72) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 73) * Exchange the two objects in memory. This exploits base+index
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 74) * addressing, which basically all CPUs have, to minimize loop overhead
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 75) * computations.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 76) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 77) * We'd like to use 64-bit loads if possible. If they're not, emulating
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 78) * one requires base+index+4 addressing which x86 has but most other
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 79) * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 80) * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 81) * x32 ABI). Are there any cases the kernel needs to worry about?
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 82) */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 83) static void swap_words_64(void *a, void *b, size_t n)
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 84) {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 85) do {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 86) #ifdef CONFIG_64BIT
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 87) u64 t = *(u64 *)(a + (n -= 8));
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 88) *(u64 *)(a + n) = *(u64 *)(b + n);
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 89) *(u64 *)(b + n) = t;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 90) #else
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 91) /* Use two 32-bit transfers to avoid base+index+4 addressing */
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 92) u32 t = *(u32 *)(a + (n -= 4));
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 93) *(u32 *)(a + n) = *(u32 *)(b + n);
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 94) *(u32 *)(b + n) = t;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 95)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 96) t = *(u32 *)(a + (n -= 4));
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 97) *(u32 *)(a + n) = *(u32 *)(b + n);
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 98) *(u32 *)(b + n) = t;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 99) #endif
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 100) } while (n);
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 101) }
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 102)
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 103) /**
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 104) * swap_bytes - swap two elements a byte at a time
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 105) * @a: pointer to the first element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 106) * @b: pointer to the second element to swap
aa52619ccbe05 (Randy Dunlap 2019-05-31 22:30:00 -0700 107) * @n: element size
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 108) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 109) * This is the fallback if alignment doesn't allow using larger chunks.
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 110) */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 111) static void swap_bytes(void *a, void *b, size_t n)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 112) {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 113) do {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 114) char t = ((char *)a)[--n];
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 115) ((char *)a)[n] = ((char *)b)[n];
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 116) ((char *)b)[n] = t;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 117) } while (n);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 118) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 119)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 120) /*
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 121) * The values are arbitrary as long as they can't be confused with
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 122) * a pointer, but small integers make for the smallest compare
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 123) * instructions.
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 124) */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 125) #define SWAP_WORDS_64 (swap_func_t)0
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 126) #define SWAP_WORDS_32 (swap_func_t)1
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 127) #define SWAP_BYTES (swap_func_t)2
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 128)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 129) /*
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 130) * The function pointer is last to make tail calls most efficient if the
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 131) * compiler decides not to inline this function.
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 132) */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 133) static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 134) {
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 135) if (swap_func == SWAP_WORDS_64)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 136) swap_words_64(a, b, size);
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 137) else if (swap_func == SWAP_WORDS_32)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 138) swap_words_32(a, b, size);
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 139) else if (swap_func == SWAP_BYTES)
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 140) swap_bytes(a, b, size);
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 141) else
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 142) swap_func(a, b, (int)size);
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 143) }
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 144)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 145) #define _CMP_WRAPPER ((cmp_r_func_t)0L)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 146)
52ae533b8a18e (Andy Shevchenko 2019-10-07 16:56:54 +0300 147) static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 148) {
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 149) if (cmp == _CMP_WRAPPER)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 150) return ((cmp_func_t)(priv))(a, b);
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 151) return cmp(a, b, priv);
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 152) }
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 153)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 154) /**
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 155) * parent - given the offset of the child, find the offset of the parent.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 156) * @i: the offset of the heap element whose parent is sought. Non-zero.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 157) * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 158) * @size: size of each element
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 159) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 160) * In terms of array indexes, the parent of element j = @i/@size is simply
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 161) * (j-1)/2. But when working in byte offsets, we can't use implicit
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 162) * truncation of integer divides.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 163) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 164) * Fortunately, we only need one bit of the quotient, not the full divide.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 165) * @size has a least significant bit. That bit will be clear if @i is
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 166) * an even multiple of @size, and set if it's an odd multiple.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 167) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 168) * Logically, we're doing "if (i & lsbit) i -= size;", but since the
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 169) * branch is unpredictable, it's done with a bit of clever branch-free
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 170) * code instead.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 171) */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 172) __attribute_const__ __always_inline
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 173) static size_t parent(size_t i, unsigned int lsbit, size_t size)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 174) {
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 175) i -= size;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 176) i -= size & -(i & lsbit);
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 177) return i / 2;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 178) }
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 179)
72fd4a35a8243 (Robert P. J. Day 2007-02-10 01:45:59 -0800 180) /**
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 181) * sort_r - sort an array of elements
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 182) * @base: pointer to data to sort
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 183) * @num: number of elements
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 184) * @size: size of each element
b53907c0100a3 (Wu Fengguang 2009-01-07 18:09:11 -0800 185) * @cmp_func: pointer to comparison function
b53907c0100a3 (Wu Fengguang 2009-01-07 18:09:11 -0800 186) * @swap_func: pointer to swap function or NULL
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 187) * @priv: third argument passed to comparison function
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 188) *
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 189) * This function does a heapsort on the given array. You may provide
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 190) * a swap_func function if you need to do something more than a memory
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 191) * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 192) * avoids a slow retpoline and so is significantly faster.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 193) *
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 194) * Sorting time is O(n log n) both on average and worst-case. While
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 195) * quicksort is slightly faster on average, it suffers from exploitable
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 196) * O(n*n) worst-case behavior and extra memory requirements that make
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 197) * it less suitable for kernel use.
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 198) */
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 199) void sort_r(void *base, size_t num, size_t size,
52ae533b8a18e (Andy Shevchenko 2019-10-07 16:56:54 +0300 200) cmp_r_func_t cmp_func,
52ae533b8a18e (Andy Shevchenko 2019-10-07 16:56:54 +0300 201) swap_func_t swap_func,
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 202) const void *priv)
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 203) {
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 204) /* pre-scale counters for performance */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 205) size_t n = num * size, a = (num/2) * size;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 206) const unsigned int lsbit = size & -size; /* Used to find parent */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 207)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 208) if (!a) /* num < 2 || size == 0 */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 209) return;
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 210)
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 211) if (!swap_func) {
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 212) if (is_aligned(base, size, 8))
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 213) swap_func = SWAP_WORDS_64;
37d0ec34d111a (George Spelvin 2019-05-14 15:42:49 -0700 214) else if (is_aligned(base, size, 4))
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 215) swap_func = SWAP_WORDS_32;
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 216) else
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 217) swap_func = SWAP_BYTES;
ca96ab859ab4b (Daniel Wagner 2015-06-25 15:02:14 -0700 218) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 219)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 220) /*
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 221) * Loop invariants:
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 222) * 1. elements [a,n) satisfy the heap property (compare greater than
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 223) * all of their children),
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 224) * 2. elements [n,num*size) are sorted, and
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 225) * 3. a <= b <= c <= d <= n (whenever they are valid).
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 226) */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 227) for (;;) {
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 228) size_t b, c, d;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 229)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 230) if (a) /* Building heap: sift down --a */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 231) a -= size;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 232) else if (n -= size) /* Sorting: Extract root to --n */
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 233) do_swap(base, base + n, size, swap_func);
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 234) else /* Sort complete */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 235) break;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 236)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 237) /*
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 238) * Sift element at "a" down into heap. This is the
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 239) * "bottom-up" variant, which significantly reduces
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 240) * calls to cmp_func(): we find the sift-down path all
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 241) * the way to the leaves (one compare per level), then
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 242) * backtrack to find where to insert the target element.
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 243) *
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 244) * Because elements tend to sift down close to the leaves,
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 245) * this uses fewer compares than doing two per level
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 246) * on the way down. (A bit more than half as many on
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 247) * average, 3/4 worst-case.)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 248) */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 249) for (b = a; c = 2*b + size, (d = c + size) < n;)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 250) b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 251) if (d == n) /* Special case last leaf with no sibling */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 252) b = c;
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 253)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 254) /* Now backtrack from "b" to the correct location for "a" */
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 255) while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 256) b = parent(b, lsbit, size);
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 257) c = b; /* Where "a" belongs */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 258) while (b != a) { /* Shift it into place */
22a241ccb2c19 (George Spelvin 2019-05-14 15:42:52 -0700 259) b = parent(b, lsbit, size);
8fb583c4258d0 (George Spelvin 2019-05-14 15:42:55 -0700 260) do_swap(base + b, base + c, size, swap_func);
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 261) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 262) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 263) }
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 264) EXPORT_SYMBOL(sort_r);
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 265)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 266) void sort(void *base, size_t num, size_t size,
52ae533b8a18e (Andy Shevchenko 2019-10-07 16:56:54 +0300 267) cmp_func_t cmp_func,
52ae533b8a18e (Andy Shevchenko 2019-10-07 16:56:54 +0300 268) swap_func_t swap_func)
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 269) {
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 270) return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
4333fb96ca108 (Rasmus Villemoes 2019-08-16 13:01:22 -0300 271) }
^1da177e4c3f4 (Linus Torvalds 2005-04-16 15:20:36 -0700 272) EXPORT_SYMBOL(sort);