1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2021-2023 Oracle. All Rights Reserved.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "scrub/xfile.h"
11 #include "scrub/xfarray.h"
12 #include "scrub/scrub.h"
13 #include "scrub/trace.h"
16 * Large Arrays of Fixed-Size Records
17 * ==================================
19 * This memory array uses an xfile (which itself is a memfd "file") to store
20 * large numbers of fixed-size records in memory that can be paged out. This
21 * puts less stress on the memory reclaim algorithms during an online repair
22 * because we don't have to pin so much memory. However, array access is less
23 * direct than would be in a regular memory array. Access to the array is
24 * performed via indexed load and store methods, and an append method is
25 * provided for convenience. Array elements can be unset, which sets them to
26 * all zeroes. Unset entries are skipped during iteration, though direct loads
27 * will return a zeroed buffer. Callers are responsible for concurrency
32 * Pointer to scratch space. Because we can't access the xfile data directly,
33 * we allocate a small amount of memory on the end of the xfarray structure to
34 * buffer array items when we need space to store values temporarily.
36 static inline void *xfarray_scratch(struct xfarray *array)
41 /* Compute array index given an xfile offset. */
44 struct xfarray *array,
47 if (array->obj_size_log >= 0)
48 return (xfarray_idx_t)pos >> array->obj_size_log;
50 return div_u64((xfarray_idx_t)pos, array->obj_size);
53 /* Compute xfile offset of array element. */
54 static inline loff_t xfarray_pos(struct xfarray *array, xfarray_idx_t idx)
56 if (array->obj_size_log >= 0)
57 return idx << array->obj_size_log;
59 return idx * array->obj_size;
63 * Initialize a big memory array. Array records cannot be larger than a
64 * page, and the array cannot span more bytes than the page cache supports.
65 * If @required_capacity is nonzero, the maximum array size will be set to this
66 * quantity and the array creation will fail if the underlying storage cannot
67 * support that many records.
71 const char *description,
72 unsigned long long required_capacity,
74 struct xfarray **arrayp)
76 struct xfarray *array;
80 ASSERT(obj_size < PAGE_SIZE);
82 error = xfile_create(description, 0, &xfile);
87 array = kzalloc(sizeof(struct xfarray) + obj_size, XCHK_GFP_FLAGS);
92 array->obj_size = obj_size;
94 if (is_power_of_2(obj_size))
95 array->obj_size_log = ilog2(obj_size);
97 array->obj_size_log = -1;
99 array->max_nr = xfarray_idx(array, MAX_LFS_FILESIZE);
100 trace_xfarray_create(array, required_capacity);
102 if (required_capacity > 0) {
103 if (array->max_nr < required_capacity) {
107 array->max_nr = required_capacity;
116 xfile_destroy(xfile);
120 /* Destroy the array. */
123 struct xfarray *array)
125 xfile_destroy(array->xfile);
129 /* Load an element from the array. */
132 struct xfarray *array,
136 if (idx >= array->nr)
139 return xfile_obj_load(array->xfile, ptr, array->obj_size,
140 xfarray_pos(array, idx));
143 /* Is this array element potentially unset? */
146 struct xfarray *array,
149 void *temp = xfarray_scratch(array);
152 if (array->unset_slots == 0)
155 error = xfile_obj_load(array->xfile, temp, array->obj_size, pos);
156 if (!error && xfarray_element_is_null(array, temp))
163 * Unset an array element. If @idx is the last element in the array, the
164 * array will be truncated. Otherwise, the entry will be zeroed.
168 struct xfarray *array,
171 void *temp = xfarray_scratch(array);
172 loff_t pos = xfarray_pos(array, idx);
175 if (idx >= array->nr)
178 if (idx == array->nr - 1) {
183 if (xfarray_is_unset(array, pos))
186 memset(temp, 0, array->obj_size);
187 error = xfile_obj_store(array->xfile, temp, array->obj_size, pos);
191 array->unset_slots++;
196 * Store an element in the array. The element must not be completely zeroed,
197 * because those are considered unset sparse elements.
201 struct xfarray *array,
207 if (idx >= array->max_nr)
210 ASSERT(!xfarray_element_is_null(array, ptr));
212 ret = xfile_obj_store(array->xfile, ptr, array->obj_size,
213 xfarray_pos(array, idx));
217 array->nr = max(array->nr, idx + 1);
221 /* Is this array element NULL? */
223 xfarray_element_is_null(
224 struct xfarray *array,
227 return !memchr_inv(ptr, 0, array->obj_size);
231 * Store an element anywhere in the array that is unset. If there are no
232 * unset slots, append the element to the array.
235 xfarray_store_anywhere(
236 struct xfarray *array,
239 void *temp = xfarray_scratch(array);
240 loff_t endpos = xfarray_pos(array, array->nr);
244 /* Find an unset slot to put it in. */
246 pos < endpos && array->unset_slots > 0;
247 pos += array->obj_size) {
248 error = xfile_obj_load(array->xfile, temp, array->obj_size,
250 if (error || !xfarray_element_is_null(array, temp))
253 error = xfile_obj_store(array->xfile, ptr, array->obj_size,
258 array->unset_slots--;
262 /* No unset slots found; attach it on the end. */
263 array->unset_slots = 0;
264 return xfarray_append(array, ptr);
267 /* Return length of array. */
270 struct xfarray *array)
276 * Decide which array item we're going to read as part of an _iter_get.
277 * @cur is the array index, and @pos is the file offset of that array index in
278 * the backing xfile. Returns ENODATA if we reach the end of the records.
280 * Reading from a hole in a sparse xfile causes page instantiation, so for
281 * iterating a (possibly sparse) array we need to figure out if the cursor is
282 * pointing at a totally uninitialized hole and move the cursor up if
287 struct xfarray *array,
291 unsigned int pgoff = offset_in_page(*pos);
292 loff_t end_pos = *pos + array->obj_size - 1;
296 * If the current array record is not adjacent to a page boundary, we
297 * are in the middle of the page. We do not need to move the cursor.
299 if (pgoff != 0 && pgoff + array->obj_size - 1 < PAGE_SIZE)
303 * Call SEEK_DATA on the last byte in the record we're about to read.
304 * If the record ends at (or crosses) the end of a page then we know
305 * that the first byte of the record is backed by pages and don't need
306 * to query it. If instead the record begins at the start of the page
307 * then we know that querying the last byte is just as good as querying
308 * the first byte, since records cannot be larger than a page.
310 * If the call returns the same file offset, we know this record is
311 * backed by real pages. We do not need to move the cursor.
313 new_pos = xfile_seek_data(array->xfile, end_pos);
314 if (new_pos == -ENXIO)
318 if (new_pos == end_pos)
322 * Otherwise, SEEK_DATA told us how far up to move the file pointer to
323 * find more data. Move the array index to the first record past the
324 * byte offset we were given.
326 new_pos = roundup_64(new_pos, array->obj_size);
327 *cur = xfarray_idx(array, new_pos);
328 *pos = xfarray_pos(array, *cur);
333 * Starting at *idx, fetch the next non-null array entry and advance the index
334 * to set up the next _load_next call. Returns ENODATA if we reach the end of
335 * the array. Callers must set @*idx to XFARRAY_CURSOR_INIT before the first
336 * call to this function.
340 struct xfarray *array,
344 xfarray_idx_t cur = *idx;
345 loff_t pos = xfarray_pos(array, cur);
349 if (cur >= array->nr)
353 * Ask the backing store for the location of next possible
354 * written record, then retrieve that record.
356 error = xfarray_find_data(array, &cur, &pos);
359 error = xfarray_load(array, cur, rec);
364 pos += array->obj_size;
365 } while (xfarray_element_is_null(array, rec));
371 /* Sorting functions */
374 # define xfarray_sort_bump_loads(si) do { (si)->loads++; } while (0)
375 # define xfarray_sort_bump_stores(si) do { (si)->stores++; } while (0)
376 # define xfarray_sort_bump_compares(si) do { (si)->compares++; } while (0)
377 # define xfarray_sort_bump_heapsorts(si) do { (si)->heapsorts++; } while (0)
379 # define xfarray_sort_bump_loads(si)
380 # define xfarray_sort_bump_stores(si)
381 # define xfarray_sort_bump_compares(si)
382 # define xfarray_sort_bump_heapsorts(si)
385 /* Load an array element for sorting. */
388 struct xfarray_sortinfo *si,
392 xfarray_sort_bump_loads(si);
393 return xfarray_load(si->array, idx, ptr);
396 /* Store an array element for sorting. */
399 struct xfarray_sortinfo *si,
403 xfarray_sort_bump_stores(si);
404 return xfarray_store(si->array, idx, ptr);
407 /* Compare an array element for sorting. */
410 struct xfarray_sortinfo *si,
414 xfarray_sort_bump_compares(si);
415 return si->cmp_fn(a, b);
418 /* Return a pointer to the low index stack for quicksort partitioning. */
419 static inline xfarray_idx_t *xfarray_sortinfo_lo(struct xfarray_sortinfo *si)
421 return (xfarray_idx_t *)(si + 1);
424 /* Return a pointer to the high index stack for quicksort partitioning. */
425 static inline xfarray_idx_t *xfarray_sortinfo_hi(struct xfarray_sortinfo *si)
427 return xfarray_sortinfo_lo(si) + si->max_stack_depth;
430 /* Size of each element in the quicksort pivot array. */
432 xfarray_pivot_rec_sz(
433 struct xfarray *array)
435 return round_up(array->obj_size, 8) + sizeof(xfarray_idx_t);
438 /* Allocate memory to handle the sort. */
440 xfarray_sortinfo_alloc(
441 struct xfarray *array,
442 xfarray_cmp_fn cmp_fn,
444 struct xfarray_sortinfo **infop)
446 struct xfarray_sortinfo *si;
447 size_t nr_bytes = sizeof(struct xfarray_sortinfo);
448 size_t pivot_rec_sz = xfarray_pivot_rec_sz(array);
452 * The median-of-nine pivot algorithm doesn't work if a subset has
453 * fewer than 9 items. Make sure the in-memory sort will always take
454 * over for subsets where this wouldn't be the case.
456 BUILD_BUG_ON(XFARRAY_QSORT_PIVOT_NR >= XFARRAY_ISORT_NR);
459 * Tail-call recursion during the partitioning phase means that
460 * quicksort will never recurse more than log2(nr) times. We need one
461 * extra level of stack to hold the initial parameters. In-memory
462 * sort will always take care of the last few levels of recursion for
463 * us, so we can reduce the stack depth by that much.
465 max_stack_depth = ilog2(array->nr) + 1 - (XFARRAY_ISORT_SHIFT - 1);
466 if (max_stack_depth < 1)
469 /* Each level of quicksort uses a lo and a hi index */
470 nr_bytes += max_stack_depth * sizeof(xfarray_idx_t) * 2;
472 /* Scratchpad for in-memory sort, or finding the pivot */
473 nr_bytes += max_t(size_t,
474 (XFARRAY_QSORT_PIVOT_NR + 1) * pivot_rec_sz,
475 XFARRAY_ISORT_NR * array->obj_size);
477 si = kvzalloc(nr_bytes, XCHK_GFP_FLAGS);
484 si->max_stack_depth = max_stack_depth;
485 si->max_stack_used = 1;
487 xfarray_sortinfo_lo(si)[0] = 0;
488 xfarray_sortinfo_hi(si)[0] = array->nr - 1;
490 trace_xfarray_sort(si, nr_bytes);
495 /* Should this sort be terminated by a fatal signal? */
497 xfarray_sort_terminated(
498 struct xfarray_sortinfo *si,
502 * If preemption is disabled, we need to yield to the scheduler every
503 * few seconds so that we don't run afoul of the soft lockup watchdog
504 * or RCU stall detector.
508 if ((si->flags & XFARRAY_SORT_KILLABLE) &&
509 fatal_signal_pending(current)) {
517 /* Do we want an in-memory sort? */
520 struct xfarray_sortinfo *si,
525 * For array subsets that fit in the scratchpad, it's much faster to
526 * use the kernel's heapsort than quicksort's stack machine.
528 return (end - start) < XFARRAY_ISORT_NR;
531 /* Return the scratch space within the sortinfo structure. */
532 static inline void *xfarray_sortinfo_isort_scratch(struct xfarray_sortinfo *si)
534 return xfarray_sortinfo_hi(si) + si->max_stack_depth;
538 * Sort a small number of array records using scratchpad memory. The records
539 * need not be contiguous in the xfile's memory pages.
543 struct xfarray_sortinfo *si,
547 void *scratch = xfarray_sortinfo_isort_scratch(si);
548 loff_t lo_pos = xfarray_pos(si->array, lo);
549 loff_t len = xfarray_pos(si->array, hi - lo + 1);
552 trace_xfarray_isort(si, lo, hi);
554 xfarray_sort_bump_loads(si);
555 error = xfile_obj_load(si->array->xfile, scratch, len, lo_pos);
559 xfarray_sort_bump_heapsorts(si);
560 sort(scratch, hi - lo + 1, si->array->obj_size, si->cmp_fn, NULL);
562 xfarray_sort_bump_stores(si);
563 return xfile_obj_store(si->array->xfile, scratch, len, lo_pos);
566 /* Grab a page for sorting records. */
568 xfarray_sort_get_page(
569 struct xfarray_sortinfo *si,
575 error = xfile_get_page(si->array->xfile, pos, len, &si->xfpage);
580 * xfile pages must never be mapped into userspace, so we skip the
581 * dcache flush when mapping the page.
583 si->page_kaddr = kmap_local_page(si->xfpage.page);
587 /* Release a page we grabbed for sorting records. */
589 xfarray_sort_put_page(
590 struct xfarray_sortinfo *si)
595 kunmap_local(si->page_kaddr);
596 si->page_kaddr = NULL;
598 return xfile_put_page(si->array->xfile, &si->xfpage);
601 /* Decide if these records are eligible for in-page sorting. */
603 xfarray_want_pagesort(
604 struct xfarray_sortinfo *si,
612 /* We can only map one page at a time. */
613 lo_page = xfarray_pos(si->array, lo) >> PAGE_SHIFT;
614 end_pos = xfarray_pos(si->array, hi) + si->array->obj_size - 1;
615 hi_page = end_pos >> PAGE_SHIFT;
617 return lo_page == hi_page;
620 /* Sort a bunch of records that all live in the same memory page. */
623 struct xfarray_sortinfo *si,
628 loff_t lo_pos = xfarray_pos(si->array, lo);
629 uint64_t len = xfarray_pos(si->array, hi - lo);
632 trace_xfarray_pagesort(si, lo, hi);
634 xfarray_sort_bump_loads(si);
635 error = xfarray_sort_get_page(si, lo_pos, len);
639 xfarray_sort_bump_heapsorts(si);
640 startp = si->page_kaddr + offset_in_page(lo_pos);
641 sort(startp, hi - lo + 1, si->array->obj_size, si->cmp_fn, NULL);
643 xfarray_sort_bump_stores(si);
644 return xfarray_sort_put_page(si);
647 /* Return a pointer to the xfarray pivot record within the sortinfo struct. */
648 static inline void *xfarray_sortinfo_pivot(struct xfarray_sortinfo *si)
650 return xfarray_sortinfo_hi(si) + si->max_stack_depth;
653 /* Return a pointer to the start of the pivot array. */
655 xfarray_sortinfo_pivot_array(
656 struct xfarray_sortinfo *si)
658 return xfarray_sortinfo_pivot(si) + si->array->obj_size;
661 /* The xfarray record is stored at the start of each pivot array element. */
663 xfarray_pivot_array_rec(
668 return pa + (pa_recsz * pa_idx);
671 /* The xfarray index is stored at the end of each pivot array element. */
672 static inline xfarray_idx_t *
673 xfarray_pivot_array_idx(
678 return xfarray_pivot_array_rec(pa, pa_recsz, pa_idx + 1) -
679 sizeof(xfarray_idx_t);
683 * Find a pivot value for quicksort partitioning, swap it with a[lo], and save
684 * the cached pivot record for the next step.
686 * Load evenly-spaced records within the given range into memory, sort them,
687 * and choose the pivot from the median record. Using multiple points will
688 * improve the quality of the pivot selection, and hopefully avoid the worst
689 * quicksort behavior, since our array values are nearly always evenly sorted.
693 struct xfarray_sortinfo *si,
697 void *pivot = xfarray_sortinfo_pivot(si);
698 void *parray = xfarray_sortinfo_pivot_array(si);
701 xfarray_idx_t step = (hi - lo) / (XFARRAY_QSORT_PIVOT_NR - 1);
702 size_t pivot_rec_sz = xfarray_pivot_rec_sz(si->array);
709 * Load the xfarray indexes of the records we intend to sample into the
712 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, 0);
714 for (i = 1; i < XFARRAY_QSORT_PIVOT_NR - 1; i++) {
715 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
716 *idxp = lo + (i * step);
718 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
719 XFARRAY_QSORT_PIVOT_NR - 1);
722 /* Load the selected xfarray records into the pivot array. */
723 for (i = 0; i < XFARRAY_QSORT_PIVOT_NR; i++) {
726 recp = xfarray_pivot_array_rec(parray, pivot_rec_sz, i);
727 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
729 /* No unset records; load directly into the array. */
730 if (likely(si->array->unset_slots == 0)) {
731 error = xfarray_sort_load(si, *idxp, recp);
738 * Load non-null records into the scratchpad without changing
739 * the xfarray_idx_t in the pivot array.
742 xfarray_sort_bump_loads(si);
743 error = xfarray_load_next(si->array, &idx, recp);
748 xfarray_sort_bump_heapsorts(si);
749 sort(parray, XFARRAY_QSORT_PIVOT_NR, pivot_rec_sz, si->cmp_fn, NULL);
752 * We sorted the pivot array records (which includes the xfarray
753 * indices) in xfarray record order. The median element of the pivot
754 * array contains the xfarray record that we will use as the pivot.
755 * Copy that xfarray record to the designated space.
757 recp = xfarray_pivot_array_rec(parray, pivot_rec_sz,
758 XFARRAY_QSORT_PIVOT_NR / 2);
759 memcpy(pivot, recp, si->array->obj_size);
761 /* If the pivot record we chose was already in a[lo] then we're done. */
762 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
763 XFARRAY_QSORT_PIVOT_NR / 2);
768 * Find the cached copy of a[lo] in the pivot array so that we can swap
769 * a[lo] and a[pivot].
771 for (i = 0, j = -1; i < XFARRAY_QSORT_PIVOT_NR; i++) {
772 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz, i);
778 return -EFSCORRUPTED;
781 /* Swap a[lo] and a[pivot]. */
782 error = xfarray_sort_store(si, lo, pivot);
786 recp = xfarray_pivot_array_rec(parray, pivot_rec_sz, j);
787 idxp = xfarray_pivot_array_idx(parray, pivot_rec_sz,
788 XFARRAY_QSORT_PIVOT_NR / 2);
789 return xfarray_sort_store(si, *idxp, recp);
793 * Set up the pointers for the next iteration. We push onto the stack all of
794 * the unsorted values between a[lo + 1] and a[end[i]], and we tweak the
795 * current stack frame to point to the unsorted values between a[beg[i]] and
796 * a[lo] so that those values will be sorted when we pop the stack.
800 struct xfarray_sortinfo *si,
801 xfarray_idx_t *si_lo,
802 xfarray_idx_t *si_hi,
806 /* Check for stack overflows */
807 if (si->stack_depth >= si->max_stack_depth - 1) {
808 ASSERT(si->stack_depth < si->max_stack_depth - 1);
809 return -EFSCORRUPTED;
812 si->max_stack_used = max_t(uint8_t, si->max_stack_used,
813 si->stack_depth + 2);
815 si_lo[si->stack_depth + 1] = lo + 1;
816 si_hi[si->stack_depth + 1] = si_hi[si->stack_depth];
817 si_hi[si->stack_depth++] = lo - 1;
820 * Always start with the smaller of the two partitions to keep the
821 * amount of recursion in check.
823 if (si_hi[si->stack_depth] - si_lo[si->stack_depth] >
824 si_hi[si->stack_depth - 1] - si_lo[si->stack_depth - 1]) {
825 swap(si_lo[si->stack_depth], si_lo[si->stack_depth - 1]);
826 swap(si_hi[si->stack_depth], si_hi[si->stack_depth - 1]);
833 * Load an element from the array into the first scratchpad and cache the page,
837 xfarray_sort_load_cached(
838 struct xfarray_sortinfo *si,
842 loff_t idx_pos = xfarray_pos(si->array, idx);
848 * If this load would split a page, release the cached page, if any,
849 * and perform a traditional read.
851 startpage = idx_pos >> PAGE_SHIFT;
852 endpage = (idx_pos + si->array->obj_size - 1) >> PAGE_SHIFT;
853 if (startpage != endpage) {
854 error = xfarray_sort_put_page(si);
858 if (xfarray_sort_terminated(si, &error))
861 return xfile_obj_load(si->array->xfile, ptr,
862 si->array->obj_size, idx_pos);
865 /* If the cached page is not the one we want, release it. */
866 if (xfile_page_cached(&si->xfpage) &&
867 xfile_page_index(&si->xfpage) != startpage) {
868 error = xfarray_sort_put_page(si);
874 * If we don't have a cached page (and we know the load is contained
875 * in a single page) then grab it.
877 if (!xfile_page_cached(&si->xfpage)) {
878 if (xfarray_sort_terminated(si, &error))
881 error = xfarray_sort_get_page(si, startpage << PAGE_SHIFT,
887 memcpy(ptr, si->page_kaddr + offset_in_page(idx_pos),
888 si->array->obj_size);
893 * Sort the array elements via quicksort. This implementation incorporates
894 * four optimizations discussed in Sedgewick:
896 * 1. Use an explicit stack of array indices to store the next array partition
897 * to sort. This helps us to avoid recursion in the call stack, which is
898 * particularly expensive in the kernel.
900 * 2. For arrays with records in arbitrary or user-controlled order, choose the
901 * pivot element using a median-of-nine decision tree. This reduces the
902 * probability of selecting a bad pivot value which causes worst case
903 * behavior (i.e. partition sizes of 1).
905 * 3. The smaller of the two sub-partitions is pushed onto the stack to start
906 * the next level of recursion, and the larger sub-partition replaces the
907 * current stack frame. This guarantees that we won't need more than
908 * log2(nr) stack space.
910 * 4. For small sets, load the records into the scratchpad and run heapsort on
911 * them because that is very fast. In the author's experience, this yields
912 * a ~10% reduction in runtime.
914 * If a small set is contained entirely within a single xfile memory page,
915 * map the page directly and run heap sort directly on the xfile page
916 * instead of using the load/store interface. This halves the runtime.
918 * 5. This optimization is specific to the implementation. When converging lo
919 * and hi after selecting a pivot, we will try to retain the xfile memory
920 * page between load calls, which reduces run time by 50%.
924 * Due to the use of signed indices, we can only support up to 2^63 records.
925 * Files can only grow to 2^63 bytes, so this is not much of a limitation.
927 #define QSORT_MAX_RECS (1ULL << 63)
931 struct xfarray *array,
932 xfarray_cmp_fn cmp_fn,
935 struct xfarray_sortinfo *si;
936 xfarray_idx_t *si_lo, *si_hi;
938 void *scratch = xfarray_scratch(array);
939 xfarray_idx_t lo, hi;
944 if (array->nr >= QSORT_MAX_RECS)
947 error = xfarray_sortinfo_alloc(array, cmp_fn, flags, &si);
950 si_lo = xfarray_sortinfo_lo(si);
951 si_hi = xfarray_sortinfo_hi(si);
952 pivot = xfarray_sortinfo_pivot(si);
954 while (si->stack_depth >= 0) {
955 lo = si_lo[si->stack_depth];
956 hi = si_hi[si->stack_depth];
958 trace_xfarray_qsort(si, lo, hi);
960 /* Nothing left in this partition to sort; pop stack. */
967 * If directly mapping the page and sorting can solve our
968 * problems, we're done.
970 if (xfarray_want_pagesort(si, lo, hi)) {
971 error = xfarray_pagesort(si, lo, hi);
978 /* If insertion sort can solve our problems, we're done. */
979 if (xfarray_want_isort(si, lo, hi)) {
980 error = xfarray_isort(si, lo, hi);
987 /* Pick a pivot, move it to a[lo] and stash it. */
988 error = xfarray_qsort_pivot(si, lo, hi);
993 * Rearrange a[lo..hi] such that everything smaller than the
994 * pivot is on the left side of the range and everything larger
995 * than the pivot is on the right side of the range.
999 * Decrement hi until it finds an a[hi] less than the
1002 error = xfarray_sort_load_cached(si, hi, scratch);
1005 while (xfarray_sort_cmp(si, scratch, pivot) >= 0 &&
1008 error = xfarray_sort_load_cached(si, hi,
1013 error = xfarray_sort_put_page(si);
1017 if (xfarray_sort_terminated(si, &error))
1020 /* Copy that item (a[hi]) to a[lo]. */
1022 error = xfarray_sort_store(si, lo++, scratch);
1028 * Increment lo until it finds an a[lo] greater than
1031 error = xfarray_sort_load_cached(si, lo, scratch);
1034 while (xfarray_sort_cmp(si, scratch, pivot) <= 0 &&
1037 error = xfarray_sort_load_cached(si, lo,
1042 error = xfarray_sort_put_page(si);
1046 if (xfarray_sort_terminated(si, &error))
1049 /* Copy that item (a[lo]) to a[hi]. */
1051 error = xfarray_sort_store(si, hi--, scratch);
1056 if (xfarray_sort_terminated(si, &error))
1061 * Put our pivot value in the correct place at a[lo]. All
1062 * values between a[beg[i]] and a[lo - 1] should be less than
1063 * the pivot; and all values between a[lo + 1] and a[end[i]-1]
1064 * should be greater than the pivot.
1066 error = xfarray_sort_store(si, lo, pivot);
1070 /* Set up the stack frame to process the two partitions. */
1071 error = xfarray_qsort_push(si, si_lo, si_hi, lo, hi);
1075 if (xfarray_sort_terminated(si, &error))
1080 trace_xfarray_sort_stats(si, error);