1 // SPDX-License-Identifier: GPL-2.0
3 * The main purpose of the tests here is to exercise the migration entry code
7 #include "../kselftest_harness.h"
13 #include <sys/prctl.h>
14 #include <sys/types.h>
18 #define TWOMEG (2<<20)
20 #define MAX_RETRIES 100
21 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
32 FIXTURE_SETUP(migration)
36 ASSERT_EQ(numa_available(), 0);
37 self->nthreads = numa_num_task_cpus() - 1;
41 for (n = 0; n < numa_max_possible_node(); n++)
42 if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
51 self->threads = malloc(self->nthreads * sizeof(*self->threads));
52 ASSERT_NE(self->threads, NULL);
53 self->pids = malloc(self->nthreads * sizeof(*self->pids));
54 ASSERT_NE(self->pids, NULL);
57 FIXTURE_TEARDOWN(migration)
63 int migrate(uint64_t *ptr, int n1, int n2)
67 struct timespec ts1, ts2;
70 if (clock_gettime(CLOCK_MONOTONIC, &ts1))
74 if (clock_gettime(CLOCK_MONOTONIC, &ts2))
77 if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
80 ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
84 /* Migration is best effort; try again */
85 if (++failures < MAX_RETRIES)
87 printf("Didn't migrate %d pages\n", ret);
90 perror("Couldn't migrate pages");
102 void *access_mem(void *ptr)
104 volatile uint64_t y = 0;
105 volatile uint64_t *x = ptr;
108 pthread_testcancel();
111 /* Prevent the compiler from optimizing out the writes to y: */
112 asm volatile("" : "+r" (y));
119 * Basic migration entry testing. One thread will move pages back and forth
120 * between nodes whilst other threads try and access them triggering the
121 * migration entry wait paths in the kernel.
123 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
128 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
129 SKIP(return, "Not enough threads or NUMA nodes available");
131 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
132 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
133 ASSERT_NE(ptr, MAP_FAILED);
135 memset(ptr, 0xde, TWOMEG);
136 for (i = 0; i < self->nthreads - 1; i++)
137 if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
138 perror("Couldn't create thread");
140 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
141 for (i = 0; i < self->nthreads - 1; i++)
142 ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
146 * Same as the previous test but with shared memory.
148 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
154 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
155 SKIP(return, "Not enough threads or NUMA nodes available");
157 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
158 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
159 ASSERT_NE(ptr, MAP_FAILED);
161 memset(ptr, 0xde, TWOMEG);
162 for (i = 0; i < self->nthreads - 1; i++) {
165 prctl(PR_SET_PDEATHSIG, SIGHUP);
166 /* Parent may have died before prctl so check now. */
168 kill(getpid(), SIGHUP);
175 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
176 for (i = 0; i < self->nthreads - 1; i++)
177 ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
181 * Tests the pmd migration entry paths.
183 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
188 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
189 SKIP(return, "Not enough threads or NUMA nodes available");
191 ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
192 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
193 ASSERT_NE(ptr, MAP_FAILED);
195 ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
196 ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
197 memset(ptr, 0xde, TWOMEG);
198 for (i = 0; i < self->nthreads - 1; i++)
199 if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
200 perror("Couldn't create thread");
202 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
203 for (i = 0; i < self->nthreads - 1; i++)
204 ASSERT_EQ(pthread_cancel(self->threads[i]), 0);