1 // SPDX-License-Identifier: GPL-2.0
3 * The main purpose of the tests here is to exercise the migration entry code
7 #include "../kselftest_harness.h"
13 #include <sys/types.h>
17 #define TWOMEG (2<<20)
20 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
31 FIXTURE_SETUP(migration)
35 ASSERT_EQ(numa_available(), 0);
36 self->nthreads = numa_num_task_cpus() - 1;
40 for (n = 0; n < numa_max_possible_node(); n++)
41 if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
50 self->threads = malloc(self->nthreads * sizeof(*self->threads));
51 ASSERT_NE(self->threads, NULL);
52 self->pids = malloc(self->nthreads * sizeof(*self->pids));
53 ASSERT_NE(self->pids, NULL);
56 FIXTURE_TEARDOWN(migration)
62 int migrate(uint64_t *ptr, int n1, int n2)
66 struct timespec ts1, ts2;
68 if (clock_gettime(CLOCK_MONOTONIC, &ts1))
72 if (clock_gettime(CLOCK_MONOTONIC, &ts2))
75 if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
78 ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
82 printf("Didn't migrate %d pages\n", ret);
84 perror("Couldn't migrate pages");
96 void *access_mem(void *ptr)
99 volatile uint64_t *x = ptr;
102 pthread_testcancel();
110 * Basic migration entry testing. One thread will move pages back and forth
111 * between nodes whilst other threads try and access them triggering the
112 * migration entry wait paths in the kernel.
114 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
119 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
120 SKIP(return, "Not enough threads or NUMA nodes available");
122 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
123 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
124 ASSERT_NE(ptr, MAP_FAILED);
126 memset(ptr, 0xde, TWOMEG);
127 for (i = 0; i < self->nthreads - 1; i++)
128 if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
129 perror("Couldn't create thread");
131 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
132 for (i = 0; i < self->nthreads - 1; i++)
133 ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
137 * Same as the previous test but with shared memory.
139 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
145 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
146 SKIP(return, "Not enough threads or NUMA nodes available");
148 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
149 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
150 ASSERT_NE(ptr, MAP_FAILED);
152 memset(ptr, 0xde, TWOMEG);
153 for (i = 0; i < self->nthreads - 1; i++) {
161 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
162 for (i = 0; i < self->nthreads - 1; i++)
163 ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
167 * Tests the pmd migration entry paths.
169 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
174 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
175 SKIP(return, "Not enough threads or NUMA nodes available");
177 ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
178 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
179 ASSERT_NE(ptr, MAP_FAILED);
181 ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
182 ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
183 memset(ptr, 0xde, TWOMEG);
184 for (i = 0; i < self->nthreads - 1; i++)
185 if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
186 perror("Couldn't create thread");
188 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
189 for (i = 0; i < self->nthreads - 1; i++)
190 ASSERT_EQ(pthread_cancel(self->threads[i]), 0);