1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2020 Google LLC
15 #include "../kselftest.h"
17 #define EXPECT_SUCCESS 0
18 #define EXPECT_FAILURE 1
19 #define NON_OVERLAPPING 0
21 #define NS_PER_SEC 1000000000ULL
22 #define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */
23 #define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */
26 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
27 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
29 #define SIZE_MB(m) ((size_t)m * (1024 * 1024))
30 #define SIZE_KB(k) ((size_t)k * 1024)
33 unsigned long long src_alignment;
34 unsigned long long dest_alignment;
35 unsigned long long region_size;
37 int dest_preamble_size;
47 _1KB = 1ULL << 10, /* 1KB -> not page aligned */
62 #define MAKE_TEST(source_align, destination_align, size, \
63 overlaps, should_fail, test_name) \
67 .src_alignment = source_align, \
68 .dest_alignment = destination_align, \
69 .region_size = size, \
70 .overlapping = overlaps, \
72 .expect_failure = should_fail \
75 /* compute square root using binary search */
76 static unsigned long get_sqrt(unsigned long val)
78 unsigned long low = 1;
80 /* assuming rand_size is less than 1TB */
81 unsigned long high = (1UL << 20);
84 unsigned long mid = low + (high - low) / 2;
85 unsigned long temp = mid * mid;
97 * Returns false if the requested remap region overlaps with an
98 * existing mapping (e.g text, stack) else returns true.
100 static bool is_remap_region_valid(void *addr, unsigned long long size)
102 void *remap_addr = NULL;
105 /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
106 remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
107 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
110 if (remap_addr == MAP_FAILED) {
114 munmap(remap_addr, size);
120 /* Returns mmap_min_addr sysctl tunable from procfs */
121 static unsigned long long get_mmap_min_addr(void)
125 static unsigned long long addr;
130 fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
132 ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
137 n_matched = fscanf(fp, "%llu", &addr);
138 if (n_matched != 1) {
139 ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
150 * Using /proc/self/maps, assert that the specified address range is contained
151 * within a single mapping.
153 static bool is_range_mapped(FILE *maps_fp, unsigned long start,
158 bool success = false;
159 unsigned long first_val, second_val;
163 while (getline(&line, &len, maps_fp) != -1) {
164 if (sscanf(line, "%lx-%lx", &first_val, &second_val) != 2) {
165 ksft_exit_fail_msg("cannot parse /proc/self/maps\n");
169 if (first_val <= start && second_val >= end) {
179 * Returns the start address of the mapping on success, else returns
182 static void *get_source_mapping(struct config c)
184 unsigned long long addr = 0ULL;
185 void *src_addr = NULL;
186 unsigned long long mmap_min_addr;
188 mmap_min_addr = get_mmap_min_addr();
190 * For some tests, we need to not have any mappings below the
191 * source mapping. Add some headroom to mmap_min_addr for this.
193 mmap_min_addr += 10 * _4MB;
196 addr += c.src_alignment;
197 if (addr < mmap_min_addr)
200 src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
201 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
203 if (src_addr == MAP_FAILED) {
204 if (errno == EPERM || errno == EEXIST)
209 * Check that the address is aligned to the specified alignment.
210 * Addresses which have alignments that are multiples of that
211 * specified are not considered valid. For instance, 1GB address is
212 * 2MB-aligned, however it will not be considered valid for a
213 * requested alignment of 2MB. This is done to reduce coincidental
214 * alignment in the tests.
216 if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
217 !((unsigned long long) src_addr & c.src_alignment)) {
218 munmap(src_addr, c.region_size);
227 ksft_print_msg("Failed to map source region: %s\n",
233 * This test validates that merge is called when expanding a mapping.
234 * Mapping containing three pages is created, middle page is unmapped
235 * and then the mapping containing the first page is expanded so that
236 * it fills the created hole. The two parts should merge creating
237 * single mapping with three pages.
239 static void mremap_expand_merge(FILE *maps_fp, unsigned long page_size)
241 char *test_name = "mremap expand merge";
242 bool success = false;
245 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
246 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
248 if (start == MAP_FAILED) {
249 ksft_print_msg("mmap failed: %s\n", strerror(errno));
253 munmap(start + page_size, page_size);
254 remap = mremap(start, page_size, 2 * page_size, 0);
255 if (remap == MAP_FAILED) {
256 ksft_print_msg("mremap failed: %s\n", strerror(errno));
257 munmap(start, page_size);
258 munmap(start + 2 * page_size, page_size);
262 success = is_range_mapped(maps_fp, (unsigned long)start,
263 (unsigned long)(start + 3 * page_size));
264 munmap(start, 3 * page_size);
268 ksft_test_result_pass("%s\n", test_name);
270 ksft_test_result_fail("%s\n", test_name);
274 * Similar to mremap_expand_merge() except instead of removing the middle page,
275 * we remove the last then attempt to remap offset from the second page. This
276 * should result in the mapping being restored to its former state.
278 static void mremap_expand_merge_offset(FILE *maps_fp, unsigned long page_size)
281 char *test_name = "mremap expand merge offset";
282 bool success = false;
285 start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
286 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
288 if (start == MAP_FAILED) {
289 ksft_print_msg("mmap failed: %s\n", strerror(errno));
293 /* Unmap final page to ensure we have space to expand. */
294 munmap(start + 2 * page_size, page_size);
295 remap = mremap(start + page_size, page_size, 2 * page_size, 0);
296 if (remap == MAP_FAILED) {
297 ksft_print_msg("mremap failed: %s\n", strerror(errno));
298 munmap(start, 2 * page_size);
302 success = is_range_mapped(maps_fp, (unsigned long)start,
303 (unsigned long)(start + 3 * page_size));
304 munmap(start, 3 * page_size);
308 ksft_test_result_pass("%s\n", test_name);
310 ksft_test_result_fail("%s\n", test_name);
314 * Verify that an mremap within a range does not cause corruption
315 * of unrelated part of range.
317 * Consider the following range which is 2MB aligned and is
318 * a part of a larger 20MB range which is not shown. Each
319 * character is 256KB below making the source and destination
320 * 2MB each. The lower case letters are moved (s to d) and the
321 * upper case letters are not moved. The below test verifies
322 * that the upper case S letters are not corrupted by the
327 static void mremap_move_within_range(unsigned int pattern_seed, char *rand_addr)
329 char *test_name = "mremap mremap move within range";
333 size_t size = SIZE_MB(20);
334 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
335 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
336 if (ptr == MAP_FAILED) {
341 memset(ptr, 0, size);
343 src = ptr + SIZE_MB(6);
344 src = (void *)((unsigned long)src & ~(SIZE_MB(2) - 1));
346 /* Set byte pattern for source block. */
347 memcpy(src, rand_addr, SIZE_MB(2));
349 dest = src - SIZE_MB(2);
351 void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
352 MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
353 if (new_ptr == MAP_FAILED) {
359 /* Verify byte pattern after remapping */
361 for (i = 0; i < SIZE_MB(1); i++) {
362 char c = (char) rand();
364 if (((char *)src)[i] != c) {
365 ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
367 ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
368 ((char *) src)[i] & 0xff);
374 if (munmap(ptr, size) == -1)
378 ksft_test_result_pass("%s\n", test_name);
380 ksft_test_result_fail("%s\n", test_name);
383 /* Returns the time taken for the remap on success else returns -1. */
384 static long long remap_region(struct config c, unsigned int threshold_mb,
387 void *addr, *src_addr, *dest_addr, *dest_preamble_addr;
388 unsigned long long t, d;
389 struct timespec t_start = {0, 0}, t_end = {0, 0};
390 long long start_ns, end_ns, align_mask, ret, offset;
391 unsigned long long threshold;
392 unsigned long num_chunks;
394 if (threshold_mb == VALIDATION_NO_THRESHOLD)
395 threshold = c.region_size;
397 threshold = MIN(threshold_mb * _1MB, c.region_size);
399 src_addr = get_source_mapping(c);
405 /* Set byte pattern for source block. */
406 memcpy(src_addr, rand_addr, threshold);
408 /* Mask to zero out lower bits of address for alignment */
409 align_mask = ~(c.dest_alignment - 1);
410 /* Offset of destination address from the end of the source region */
411 offset = (c.overlapping) ? -c.dest_alignment : c.dest_alignment;
412 addr = (void *) (((unsigned long long) src_addr + c.region_size
413 + offset) & align_mask);
415 /* Remap after the destination block preamble. */
416 addr += c.dest_preamble_size;
418 /* See comment in get_source_mapping() */
419 if (!((unsigned long long) addr & c.dest_alignment))
420 addr = (void *) ((unsigned long long) addr | c.dest_alignment);
422 /* Don't destroy existing mappings unless expected to overlap */
423 while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
424 /* Check for unsigned overflow */
425 if (addr + c.dest_alignment < addr) {
426 ksft_print_msg("Couldn't find a valid region to remap to\n");
430 addr += c.dest_alignment;
433 if (c.dest_preamble_size) {
434 dest_preamble_addr = mmap((void *) addr - c.dest_preamble_size, c.dest_preamble_size,
435 PROT_READ | PROT_WRITE,
436 MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
438 if (dest_preamble_addr == MAP_FAILED) {
439 ksft_print_msg("Failed to map dest preamble region: %s\n",
445 /* Set byte pattern for the dest preamble block. */
446 memcpy(dest_preamble_addr, rand_addr, c.dest_preamble_size);
449 clock_gettime(CLOCK_MONOTONIC, &t_start);
450 dest_addr = mremap(src_addr, c.region_size, c.region_size,
451 MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
452 clock_gettime(CLOCK_MONOTONIC, &t_end);
454 if (dest_addr == MAP_FAILED) {
455 ksft_print_msg("mremap failed: %s\n", strerror(errno));
457 goto clean_up_dest_preamble;
461 * Verify byte pattern after remapping. Employ an algorithm with a
462 * square root time complexity in threshold: divide the range into
463 * chunks, if memcmp() returns non-zero, only then perform an
464 * iteration in that chunk to find the mismatch index.
466 num_chunks = get_sqrt(threshold);
467 for (unsigned long i = 0; i < num_chunks; ++i) {
468 size_t chunk_size = threshold / num_chunks;
469 unsigned long shift = i * chunk_size;
471 if (!memcmp(dest_addr + shift, rand_addr + shift, chunk_size))
474 /* brute force iteration only over mismatch segment */
475 for (t = shift; t < shift + chunk_size; ++t) {
476 if (((char *) dest_addr)[t] != rand_addr[t]) {
477 ksft_print_msg("Data after remap doesn't match at offset %llu\n",
479 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
480 ((char *) dest_addr)[t] & 0xff);
488 * if threshold is not divisible by num_chunks, then check the
491 for (t = num_chunks * (threshold / num_chunks); t < threshold; ++t) {
492 if (((char *) dest_addr)[t] != rand_addr[t]) {
493 ksft_print_msg("Data after remap doesn't match at offset %llu\n",
495 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[t] & 0xff,
496 ((char *) dest_addr)[t] & 0xff);
502 /* Verify the dest preamble byte pattern after remapping */
503 if (!c.dest_preamble_size)
506 num_chunks = get_sqrt(c.dest_preamble_size);
508 for (unsigned long i = 0; i < num_chunks; ++i) {
509 size_t chunk_size = c.dest_preamble_size / num_chunks;
510 unsigned long shift = i * chunk_size;
512 if (!memcmp(dest_preamble_addr + shift, rand_addr + shift,
516 /* brute force iteration only over mismatched segment */
517 for (d = shift; d < shift + chunk_size; ++d) {
518 if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
519 ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
521 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
522 ((char *) dest_preamble_addr)[d] & 0xff);
529 for (d = num_chunks * (c.dest_preamble_size / num_chunks); d < c.dest_preamble_size; ++d) {
530 if (((char *) dest_preamble_addr)[d] != rand_addr[d]) {
531 ksft_print_msg("Preamble data after remap doesn't match at offset %llu\n",
533 ksft_print_msg("Expected: %#x\t Got: %#x\n", rand_addr[d] & 0xff,
534 ((char *) dest_preamble_addr)[d] & 0xff);
541 start_ns = t_start.tv_sec * NS_PER_SEC + t_start.tv_nsec;
542 end_ns = t_end.tv_sec * NS_PER_SEC + t_end.tv_nsec;
543 ret = end_ns - start_ns;
546 * Since the destination address is specified using MREMAP_FIXED, subsequent
547 * mremap will unmap any previous mapping at the address range specified by
548 * dest_addr and region_size. This significantly affects the remap time of
549 * subsequent tests. So we clean up mappings after each test.
552 munmap(dest_addr, c.region_size);
553 clean_up_dest_preamble:
554 if (c.dest_preamble_size && dest_preamble_addr)
555 munmap(dest_preamble_addr, c.dest_preamble_size);
557 munmap(src_addr, c.region_size);
563 * Verify that an mremap aligning down does not destroy
564 * the beginning of the mapping just because the aligned
565 * down address landed on a mapping that maybe does not exist.
567 static void mremap_move_1mb_from_start(unsigned int pattern_seed,
570 char *test_name = "mremap move 1mb from start at 1MB+256KB aligned src";
571 void *src = NULL, *dest = NULL;
574 /* Config to reuse get_source_mapping() to do an aligned mmap. */
576 .src_alignment = SIZE_MB(1) + SIZE_KB(256),
577 .region_size = SIZE_MB(6)
580 src = get_source_mapping(c);
586 c.src_alignment = SIZE_MB(1) + SIZE_KB(256);
587 dest = get_source_mapping(c);
593 /* Set byte pattern for source block. */
594 memcpy(src, rand_addr, SIZE_MB(2));
597 * Unmap the beginning of dest so that the aligned address
598 * falls on no mapping.
600 munmap(dest, SIZE_MB(1));
602 void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1),
603 MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1));
604 if (new_ptr == MAP_FAILED) {
610 /* Verify byte pattern after remapping */
612 for (i = 0; i < SIZE_MB(1); i++) {
613 char c = (char) rand();
615 if (((char *)src)[i] != c) {
616 ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n",
618 ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
619 ((char *) src)[i] & 0xff);
625 if (src && munmap(src, c.region_size) == -1)
626 perror("munmap src");
628 if (dest && munmap(dest, c.region_size) == -1)
629 perror("munmap dest");
632 ksft_test_result_pass("%s\n", test_name);
634 ksft_test_result_fail("%s\n", test_name);
637 static void run_mremap_test_case(struct test test_case, int *failures,
638 unsigned int threshold_mb,
639 unsigned int pattern_seed, char *rand_addr)
641 long long remap_time = remap_region(test_case.config, threshold_mb,
644 if (remap_time < 0) {
645 if (test_case.expect_failure)
646 ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
649 ksft_test_result_fail("%s\n", test_case.name);
654 * Comparing mremap time is only applicable if entire region
657 if (threshold_mb == VALIDATION_NO_THRESHOLD ||
658 test_case.config.region_size <= threshold_mb * _1MB)
659 ksft_test_result_pass("%s\n\tmremap time: %12lldns\n",
660 test_case.name, remap_time);
662 ksft_test_result_pass("%s\n", test_case.name);
666 static void usage(const char *cmd)
669 "Usage: %s [[-t <threshold_mb>] [-p <pattern_seed>]]\n"
670 "-t\t only validate threshold_mb of the remapped region\n"
671 " \t if 0 is supplied no threshold is used; all tests\n"
672 " \t are run and remapped regions validated fully.\n"
673 " \t The default threshold used is 4MB.\n"
674 "-p\t provide a seed to generate the random pattern for\n"
675 " \t validating the remapped region.\n", cmd);
678 static int parse_args(int argc, char **argv, unsigned int *threshold_mb,
679 unsigned int *pattern_seed)
681 const char *optstr = "t:p:";
684 while ((opt = getopt(argc, argv, optstr)) != -1) {
687 *threshold_mb = atoi(optarg);
690 *pattern_seed = atoi(optarg);
707 #define MAX_PERF_TEST 3
708 int main(int argc, char **argv)
711 int i, run_perf_tests;
712 unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
714 /* hard-coded test configs */
715 size_t max_test_variable_region_size = _2GB;
716 size_t max_test_constant_region_size = _2MB;
717 size_t dest_preamble_size = 10 * _4MB;
719 unsigned int pattern_seed;
722 int num_expand_tests = 2;
723 int num_misc_tests = 2;
724 struct test test_cases[MAX_TEST] = {};
725 struct test perf_test_cases[MAX_PERF_TEST];
730 pattern_seed = (unsigned int) time(&t);
732 if (parse_args(argc, argv, &threshold_mb, &pattern_seed) < 0)
735 ksft_print_msg("Test configs:\n\tthreshold_mb=%u\n\tpattern_seed=%u\n\n",
736 threshold_mb, pattern_seed);
739 * set preallocated random array according to test configs; see the
740 * functions for the logic of setting the size
743 rand_size = MAX(max_test_variable_region_size,
744 max_test_constant_region_size);
746 rand_size = MAX(MIN(threshold_mb * _1MB,
747 max_test_variable_region_size),
748 max_test_constant_region_size);
749 rand_size = MAX(dest_preamble_size, rand_size);
751 rand_addr = (char *)mmap(NULL, rand_size, PROT_READ | PROT_WRITE,
752 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
753 if (rand_addr == MAP_FAILED) {
755 ksft_exit_fail_msg("cannot mmap rand_addr\n");
758 /* fill stream of random bytes */
760 for (unsigned long i = 0; i < rand_size; ++i)
761 rand_addr[i] = (char) rand();
763 page_size = sysconf(_SC_PAGESIZE);
765 /* Expected mremap failures */
766 test_cases[0] = MAKE_TEST(page_size, page_size, page_size,
767 OVERLAPPING, EXPECT_FAILURE,
768 "mremap - Source and Destination Regions Overlapping");
770 test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size,
771 NON_OVERLAPPING, EXPECT_FAILURE,
772 "mremap - Destination Address Misaligned (1KB-aligned)");
773 test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size,
774 NON_OVERLAPPING, EXPECT_FAILURE,
775 "mremap - Source Address Misaligned (1KB-aligned)");
777 /* Src addr PTE aligned */
778 test_cases[3] = MAKE_TEST(PTE, PTE, PTE * 2,
779 NON_OVERLAPPING, EXPECT_SUCCESS,
780 "8KB mremap - Source PTE-aligned, Destination PTE-aligned");
782 /* Src addr 1MB aligned */
783 test_cases[4] = MAKE_TEST(_1MB, PTE, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
784 "2MB mremap - Source 1MB-aligned, Destination PTE-aligned");
785 test_cases[5] = MAKE_TEST(_1MB, _1MB, _2MB, NON_OVERLAPPING, EXPECT_SUCCESS,
786 "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
788 /* Src addr PMD aligned */
789 test_cases[6] = MAKE_TEST(PMD, PTE, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
790 "4MB mremap - Source PMD-aligned, Destination PTE-aligned");
791 test_cases[7] = MAKE_TEST(PMD, _1MB, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
792 "4MB mremap - Source PMD-aligned, Destination 1MB-aligned");
793 test_cases[8] = MAKE_TEST(PMD, PMD, _4MB, NON_OVERLAPPING, EXPECT_SUCCESS,
794 "4MB mremap - Source PMD-aligned, Destination PMD-aligned");
796 /* Src addr PUD aligned */
797 test_cases[9] = MAKE_TEST(PUD, PTE, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
798 "2GB mremap - Source PUD-aligned, Destination PTE-aligned");
799 test_cases[10] = MAKE_TEST(PUD, _1MB, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
800 "2GB mremap - Source PUD-aligned, Destination 1MB-aligned");
801 test_cases[11] = MAKE_TEST(PUD, PMD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
802 "2GB mremap - Source PUD-aligned, Destination PMD-aligned");
803 test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS,
804 "2GB mremap - Source PUD-aligned, Destination PUD-aligned");
806 /* Src and Dest addr 1MB aligned. 5MB mremap. */
807 test_cases[13] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
808 "5MB mremap - Source 1MB-aligned, Destination 1MB-aligned");
810 /* Src and Dest addr 1MB aligned. 5MB mremap. */
811 test_cases[14] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS,
812 "5MB mremap - Source 1MB-aligned, Dest 1MB-aligned with 40MB Preamble");
813 test_cases[14].config.dest_preamble_size = 10 * _4MB;
815 perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
816 "1GB mremap - Source PTE-aligned, Destination PTE-aligned");
818 * mremap 1GB region - Page table level aligned time
821 perf_test_cases[1] = MAKE_TEST(PMD, PMD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
822 "1GB mremap - Source PMD-aligned, Destination PMD-aligned");
823 perf_test_cases[2] = MAKE_TEST(PUD, PUD, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS,
824 "1GB mremap - Source PUD-aligned, Destination PUD-aligned");
826 run_perf_tests = (threshold_mb == VALIDATION_NO_THRESHOLD) ||
827 (threshold_mb * _1MB >= _1GB);
829 ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ?
830 ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests + num_misc_tests);
832 for (i = 0; i < ARRAY_SIZE(test_cases); i++)
833 run_mremap_test_case(test_cases[i], &failures, threshold_mb,
834 pattern_seed, rand_addr);
836 maps_fp = fopen("/proc/self/maps", "r");
838 if (maps_fp == NULL) {
839 munmap(rand_addr, rand_size);
840 ksft_exit_fail_msg("Failed to read /proc/self/maps: %s\n", strerror(errno));
843 mremap_expand_merge(maps_fp, page_size);
844 mremap_expand_merge_offset(maps_fp, page_size);
848 mremap_move_within_range(pattern_seed, rand_addr);
849 mremap_move_1mb_from_start(pattern_seed, rand_addr);
851 if (run_perf_tests) {
852 ksft_print_msg("\n%s\n",
853 "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:");
854 for (i = 0; i < ARRAY_SIZE(perf_test_cases); i++)
855 run_mremap_test_case(perf_test_cases[i], &failures,
856 threshold_mb, pattern_seed,
860 munmap(rand_addr, rand_size);