diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2015-09-04 18:47:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 19:54:41 -0400 |
commit | c47174fc362a089b1125174258e53ef4a69ce6b8 (patch) | |
tree | aca85288d10022ac2ccc45931a769e2932b971ee | |
parent | 2c5b7e1be74ff0175dedbbd325abe9f0dbbb09ae (diff) |
userfaultfd: selftest
This test allocates two virtual areas and bounces the physical memory
across the two virtual areas using only userfaultfd.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Shuah Khan <shuah.kh@samsung.com>
Cc: Shuah Khan <shuahkh@osg.samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | tools/testing/selftests/vm/Makefile | 3 | ||||
-rwxr-xr-x | tools/testing/selftests/vm/run_vmtests | 11 | ||||
-rw-r--r-- | tools/testing/selftests/vm/userfaultfd.c | 636 |
3 files changed, 650 insertions, 0 deletions
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 231b9a031f6a..0d6854744b37 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -8,10 +8,13 @@ BINARIES += hugetlbfstest | |||
8 | BINARIES += map_hugetlb | 8 | BINARIES += map_hugetlb |
9 | BINARIES += thuge-gen | 9 | BINARIES += thuge-gen |
10 | BINARIES += transhuge-stress | 10 | BINARIES += transhuge-stress |
11 | BINARIES += userfaultfd | ||
11 | 12 | ||
12 | all: $(BINARIES) | 13 | all: $(BINARIES) |
13 | %: %.c | 14 | %: %.c |
14 | $(CC) $(CFLAGS) -o $@ $^ -lrt | 15 | $(CC) $(CFLAGS) -o $@ $^ -lrt |
16 | userfaultfd: userfaultfd.c | ||
17 | $(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread | ||
15 | 18 | ||
16 | TEST_PROGS := run_vmtests | 19 | TEST_PROGS := run_vmtests |
17 | TEST_FILES := $(BINARIES) | 20 | TEST_FILES := $(BINARIES) |
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 49ece11ff7fd..831adeb5fc55 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests | |||
@@ -86,6 +86,17 @@ else | |||
86 | echo "[PASS]" | 86 | echo "[PASS]" |
87 | fi | 87 | fi |
88 | 88 | ||
89 | echo "--------------------" | ||
90 | echo "running userfaultfd" | ||
91 | echo "--------------------" | ||
92 | ./userfaultfd 128 32 | ||
93 | if [ $? -ne 0 ]; then | ||
94 | echo "[FAIL]" | ||
95 | exitcode=1 | ||
96 | else | ||
97 | echo "[PASS]" | ||
98 | fi | ||
99 | |||
89 | #cleanup | 100 | #cleanup |
90 | umount $mnt | 101 | umount $mnt |
91 | rm -rf $mnt | 102 | rm -rf $mnt |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c new file mode 100644 index 000000000000..0c0b83953352 --- /dev/null +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* | ||
2 | * Stress userfaultfd syscall. | ||
3 | * | ||
4 | * Copyright (C) 2015 Red Hat, Inc. | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
7 | * the COPYING file in the top-level directory. | ||
8 | * | ||
9 | * This test allocates two virtual areas and bounces the physical | ||
10 | * memory across the two virtual areas (from area_src to area_dst) | ||
11 | * using userfaultfd. | ||
12 | * | ||
13 | * There are three threads running per CPU: | ||
14 | * | ||
15 | * 1) one per-CPU thread takes a per-page pthread_mutex in a random | ||
16 | * page of the area_dst (while the physical page may still be in | ||
17 | * area_src), and increments a per-page counter in the same page, | ||
18 | * and checks its value against a verification region. | ||
19 | * | ||
20 | * 2) another per-CPU thread handles the userfaults generated by | ||
21 | * thread 1 above. userfaultfd blocking reads or poll() modes are | ||
22 | * exercised interleaved. | ||
23 | * | ||
24 | * 3) one last per-CPU thread transfers the memory in the background | ||
25 | * at maximum bandwidth (if not already transferred by thread | ||
26 | * 2). Each cpu thread takes cares of transferring a portion of the | ||
27 | * area. | ||
28 | * | ||
29 | * When all threads of type 3 completed the transfer, one bounce is | ||
30 | * complete. area_src and area_dst are then swapped. All threads are | ||
31 | * respawned and so the bounce is immediately restarted in the | ||
32 | * opposite direction. | ||
33 | * | ||
34 | * per-CPU threads 1 by triggering userfaults inside | ||
35 | * pthread_mutex_lock will also verify the atomicity of the memory | ||
36 | * transfer (UFFDIO_COPY). | ||
37 | * | ||
38 | * The program takes two parameters: the amounts of physical memory in | ||
39 | * megabytes (MiB) of the area and the number of bounces to execute. | ||
40 | * | ||
41 | * # 100MiB 99999 bounces | ||
42 | * ./userfaultfd 100 99999 | ||
43 | * | ||
44 | * # 1GiB 99 bounces | ||
45 | * ./userfaultfd 1000 99 | ||
46 | * | ||
47 | * # 10MiB-~6GiB 999 bounces, continue forever unless an error triggers | ||
48 | * while ./userfaultfd $[RANDOM % 6000 + 10] 999; do true; done | ||
49 | */ | ||
50 | |||
51 | #define _GNU_SOURCE | ||
52 | #include <stdio.h> | ||
53 | #include <errno.h> | ||
54 | #include <unistd.h> | ||
55 | #include <stdlib.h> | ||
56 | #include <sys/types.h> | ||
57 | #include <sys/stat.h> | ||
58 | #include <fcntl.h> | ||
59 | #include <time.h> | ||
60 | #include <signal.h> | ||
61 | #include <poll.h> | ||
62 | #include <string.h> | ||
63 | #include <sys/mman.h> | ||
64 | #include <sys/syscall.h> | ||
65 | #include <sys/ioctl.h> | ||
66 | #include <pthread.h> | ||
67 | #include "../../../../include/uapi/linux/userfaultfd.h" | ||
68 | |||
69 | #ifdef __x86_64__ | ||
70 | #define __NR_userfaultfd 323 | ||
71 | #elif defined(__i386__) | ||
72 | #define __NR_userfaultfd 359 | ||
73 | #elif defined(__powewrpc__) | ||
74 | #define __NR_userfaultfd 364 | ||
75 | #else | ||
76 | #error "missing __NR_userfaultfd definition" | ||
77 | #endif | ||
78 | |||
79 | static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; | ||
80 | |||
81 | #define BOUNCE_RANDOM (1<<0) | ||
82 | #define BOUNCE_RACINGFAULTS (1<<1) | ||
83 | #define BOUNCE_VERIFY (1<<2) | ||
84 | #define BOUNCE_POLL (1<<3) | ||
85 | static int bounces; | ||
86 | |||
87 | static unsigned long long *count_verify; | ||
88 | static int uffd, finished, *pipefd; | ||
89 | static char *area_src, *area_dst; | ||
90 | static char *zeropage; | ||
91 | pthread_attr_t attr; | ||
92 | |||
93 | /* pthread_mutex_t starts at page offset 0 */ | ||
94 | #define area_mutex(___area, ___nr) \ | ||
95 | ((pthread_mutex_t *) ((___area) + (___nr)*page_size)) | ||
96 | /* | ||
97 | * count is placed in the page after pthread_mutex_t naturally aligned | ||
98 | * to avoid non alignment faults on non-x86 archs. | ||
99 | */ | ||
100 | #define area_count(___area, ___nr) \ | ||
101 | ((volatile unsigned long long *) ((unsigned long) \ | ||
102 | ((___area) + (___nr)*page_size + \ | ||
103 | sizeof(pthread_mutex_t) + \ | ||
104 | sizeof(unsigned long long) - 1) & \ | ||
105 | ~(unsigned long)(sizeof(unsigned long long) \ | ||
106 | - 1))) | ||
107 | |||
108 | static int my_bcmp(char *str1, char *str2, size_t n) | ||
109 | { | ||
110 | unsigned long i; | ||
111 | for (i = 0; i < n; i++) | ||
112 | if (str1[i] != str2[i]) | ||
113 | return 1; | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static void *locking_thread(void *arg) | ||
118 | { | ||
119 | unsigned long cpu = (unsigned long) arg; | ||
120 | struct random_data rand; | ||
121 | unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */ | ||
122 | int32_t rand_nr; | ||
123 | unsigned long long count; | ||
124 | char randstate[64]; | ||
125 | unsigned int seed; | ||
126 | time_t start; | ||
127 | |||
128 | if (bounces & BOUNCE_RANDOM) { | ||
129 | seed = (unsigned int) time(NULL) - bounces; | ||
130 | if (!(bounces & BOUNCE_RACINGFAULTS)) | ||
131 | seed += cpu; | ||
132 | bzero(&rand, sizeof(rand)); | ||
133 | bzero(&randstate, sizeof(randstate)); | ||
134 | if (initstate_r(seed, randstate, sizeof(randstate), &rand)) | ||
135 | fprintf(stderr, "srandom_r error\n"), exit(1); | ||
136 | } else { | ||
137 | page_nr = -bounces; | ||
138 | if (!(bounces & BOUNCE_RACINGFAULTS)) | ||
139 | page_nr += cpu * nr_pages_per_cpu; | ||
140 | } | ||
141 | |||
142 | while (!finished) { | ||
143 | if (bounces & BOUNCE_RANDOM) { | ||
144 | if (random_r(&rand, &rand_nr)) | ||
145 | fprintf(stderr, "random_r 1 error\n"), exit(1); | ||
146 | page_nr = rand_nr; | ||
147 | if (sizeof(page_nr) > sizeof(rand_nr)) { | ||
148 | if (random_r(&rand, &rand_nr)) | ||
149 | fprintf(stderr, "random_r 2 error\n"), exit(1); | ||
150 | page_nr |= ((unsigned long) rand_nr) << 32; | ||
151 | } | ||
152 | } else | ||
153 | page_nr += 1; | ||
154 | page_nr %= nr_pages; | ||
155 | |||
156 | start = time(NULL); | ||
157 | if (bounces & BOUNCE_VERIFY) { | ||
158 | count = *area_count(area_dst, page_nr); | ||
159 | if (!count) | ||
160 | fprintf(stderr, | ||
161 | "page_nr %lu wrong count %Lu %Lu\n", | ||
162 | page_nr, count, | ||
163 | count_verify[page_nr]), exit(1); | ||
164 | |||
165 | |||
166 | /* | ||
167 | * We can't use bcmp (or memcmp) because that | ||
168 | * returns 0 erroneously if the memory is | ||
169 | * changing under it (even if the end of the | ||
170 | * page is never changing and always | ||
171 | * different). | ||
172 | */ | ||
173 | #if 1 | ||
174 | if (!my_bcmp(area_dst + page_nr * page_size, zeropage, | ||
175 | page_size)) | ||
176 | fprintf(stderr, | ||
177 | "my_bcmp page_nr %lu wrong count %Lu %Lu\n", | ||
178 | page_nr, count, | ||
179 | count_verify[page_nr]), exit(1); | ||
180 | #else | ||
181 | unsigned long loops; | ||
182 | |||
183 | loops = 0; | ||
184 | /* uncomment the below line to test with mutex */ | ||
185 | /* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */ | ||
186 | while (!bcmp(area_dst + page_nr * page_size, zeropage, | ||
187 | page_size)) { | ||
188 | loops += 1; | ||
189 | if (loops > 10) | ||
190 | break; | ||
191 | } | ||
192 | /* uncomment below line to test with mutex */ | ||
193 | /* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */ | ||
194 | if (loops) { | ||
195 | fprintf(stderr, | ||
196 | "page_nr %lu all zero thread %lu %p %lu\n", | ||
197 | page_nr, cpu, area_dst + page_nr * page_size, | ||
198 | loops); | ||
199 | if (loops > 10) | ||
200 | exit(1); | ||
201 | } | ||
202 | #endif | ||
203 | } | ||
204 | |||
205 | pthread_mutex_lock(area_mutex(area_dst, page_nr)); | ||
206 | count = *area_count(area_dst, page_nr); | ||
207 | if (count != count_verify[page_nr]) { | ||
208 | fprintf(stderr, | ||
209 | "page_nr %lu memory corruption %Lu %Lu\n", | ||
210 | page_nr, count, | ||
211 | count_verify[page_nr]), exit(1); | ||
212 | } | ||
213 | count++; | ||
214 | *area_count(area_dst, page_nr) = count_verify[page_nr] = count; | ||
215 | pthread_mutex_unlock(area_mutex(area_dst, page_nr)); | ||
216 | |||
217 | if (time(NULL) - start > 1) | ||
218 | fprintf(stderr, | ||
219 | "userfault too slow %ld " | ||
220 | "possible false positive with overcommit\n", | ||
221 | time(NULL) - start); | ||
222 | } | ||
223 | |||
224 | return NULL; | ||
225 | } | ||
226 | |||
227 | static int copy_page(unsigned long offset) | ||
228 | { | ||
229 | struct uffdio_copy uffdio_copy; | ||
230 | |||
231 | if (offset >= nr_pages * page_size) | ||
232 | fprintf(stderr, "unexpected offset %lu\n", | ||
233 | offset), exit(1); | ||
234 | uffdio_copy.dst = (unsigned long) area_dst + offset; | ||
235 | uffdio_copy.src = (unsigned long) area_src + offset; | ||
236 | uffdio_copy.len = page_size; | ||
237 | uffdio_copy.mode = 0; | ||
238 | uffdio_copy.copy = 0; | ||
239 | if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) { | ||
240 | /* real retval in ufdio_copy.copy */ | ||
241 | if (uffdio_copy.copy != -EEXIST) | ||
242 | fprintf(stderr, "UFFDIO_COPY error %Ld\n", | ||
243 | uffdio_copy.copy), exit(1); | ||
244 | } else if (uffdio_copy.copy != page_size) { | ||
245 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", | ||
246 | uffdio_copy.copy), exit(1); | ||
247 | } else | ||
248 | return 1; | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static void *uffd_poll_thread(void *arg) | ||
253 | { | ||
254 | unsigned long cpu = (unsigned long) arg; | ||
255 | struct pollfd pollfd[2]; | ||
256 | struct uffd_msg msg; | ||
257 | int ret; | ||
258 | unsigned long offset; | ||
259 | char tmp_chr; | ||
260 | unsigned long userfaults = 0; | ||
261 | |||
262 | pollfd[0].fd = uffd; | ||
263 | pollfd[0].events = POLLIN; | ||
264 | pollfd[1].fd = pipefd[cpu*2]; | ||
265 | pollfd[1].events = POLLIN; | ||
266 | |||
267 | for (;;) { | ||
268 | ret = poll(pollfd, 2, -1); | ||
269 | if (!ret) | ||
270 | fprintf(stderr, "poll error %d\n", ret), exit(1); | ||
271 | if (ret < 0) | ||
272 | perror("poll"), exit(1); | ||
273 | if (pollfd[1].revents & POLLIN) { | ||
274 | if (read(pollfd[1].fd, &tmp_chr, 1) != 1) | ||
275 | fprintf(stderr, "read pipefd error\n"), | ||
276 | exit(1); | ||
277 | break; | ||
278 | } | ||
279 | if (!(pollfd[0].revents & POLLIN)) | ||
280 | fprintf(stderr, "pollfd[0].revents %d\n", | ||
281 | pollfd[0].revents), exit(1); | ||
282 | ret = read(uffd, &msg, sizeof(msg)); | ||
283 | if (ret < 0) { | ||
284 | if (errno == EAGAIN) | ||
285 | continue; | ||
286 | perror("nonblocking read error"), exit(1); | ||
287 | } | ||
288 | if (msg.event != UFFD_EVENT_PAGEFAULT) | ||
289 | fprintf(stderr, "unexpected msg event %u\n", | ||
290 | msg.event), exit(1); | ||
291 | if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) | ||
292 | fprintf(stderr, "unexpected write fault\n"), exit(1); | ||
293 | offset = (char *)msg.arg.pagefault.address - area_dst; | ||
294 | offset &= ~(page_size-1); | ||
295 | if (copy_page(offset)) | ||
296 | userfaults++; | ||
297 | } | ||
298 | return (void *)userfaults; | ||
299 | } | ||
300 | |||
301 | pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER; | ||
302 | |||
303 | static void *uffd_read_thread(void *arg) | ||
304 | { | ||
305 | unsigned long *this_cpu_userfaults; | ||
306 | struct uffd_msg msg; | ||
307 | unsigned long offset; | ||
308 | int ret; | ||
309 | |||
310 | this_cpu_userfaults = (unsigned long *) arg; | ||
311 | *this_cpu_userfaults = 0; | ||
312 | |||
313 | pthread_mutex_unlock(&uffd_read_mutex); | ||
314 | /* from here cancellation is ok */ | ||
315 | |||
316 | for (;;) { | ||
317 | ret = read(uffd, &msg, sizeof(msg)); | ||
318 | if (ret != sizeof(msg)) { | ||
319 | if (ret < 0) | ||
320 | perror("blocking read error"), exit(1); | ||
321 | else | ||
322 | fprintf(stderr, "short read\n"), exit(1); | ||
323 | } | ||
324 | if (msg.event != UFFD_EVENT_PAGEFAULT) | ||
325 | fprintf(stderr, "unexpected msg event %u\n", | ||
326 | msg.event), exit(1); | ||
327 | if (bounces & BOUNCE_VERIFY && | ||
328 | msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) | ||
329 | fprintf(stderr, "unexpected write fault\n"), exit(1); | ||
330 | offset = (char *)msg.arg.pagefault.address - area_dst; | ||
331 | offset &= ~(page_size-1); | ||
332 | if (copy_page(offset)) | ||
333 | (*this_cpu_userfaults)++; | ||
334 | } | ||
335 | return (void *)NULL; | ||
336 | } | ||
337 | |||
338 | static void *background_thread(void *arg) | ||
339 | { | ||
340 | unsigned long cpu = (unsigned long) arg; | ||
341 | unsigned long page_nr; | ||
342 | |||
343 | for (page_nr = cpu * nr_pages_per_cpu; | ||
344 | page_nr < (cpu+1) * nr_pages_per_cpu; | ||
345 | page_nr++) | ||
346 | copy_page(page_nr * page_size); | ||
347 | |||
348 | return NULL; | ||
349 | } | ||
350 | |||
351 | static int stress(unsigned long *userfaults) | ||
352 | { | ||
353 | unsigned long cpu; | ||
354 | pthread_t locking_threads[nr_cpus]; | ||
355 | pthread_t uffd_threads[nr_cpus]; | ||
356 | pthread_t background_threads[nr_cpus]; | ||
357 | void **_userfaults = (void **) userfaults; | ||
358 | |||
359 | finished = 0; | ||
360 | for (cpu = 0; cpu < nr_cpus; cpu++) { | ||
361 | if (pthread_create(&locking_threads[cpu], &attr, | ||
362 | locking_thread, (void *)cpu)) | ||
363 | return 1; | ||
364 | if (bounces & BOUNCE_POLL) { | ||
365 | if (pthread_create(&uffd_threads[cpu], &attr, | ||
366 | uffd_poll_thread, (void *)cpu)) | ||
367 | return 1; | ||
368 | } else { | ||
369 | if (pthread_create(&uffd_threads[cpu], &attr, | ||
370 | uffd_read_thread, | ||
371 | &_userfaults[cpu])) | ||
372 | return 1; | ||
373 | pthread_mutex_lock(&uffd_read_mutex); | ||
374 | } | ||
375 | if (pthread_create(&background_threads[cpu], &attr, | ||
376 | background_thread, (void *)cpu)) | ||
377 | return 1; | ||
378 | } | ||
379 | for (cpu = 0; cpu < nr_cpus; cpu++) | ||
380 | if (pthread_join(background_threads[cpu], NULL)) | ||
381 | return 1; | ||
382 | |||
383 | /* | ||
384 | * Be strict and immediately zap area_src, the whole area has | ||
385 | * been transferred already by the background treads. The | ||
386 | * area_src could then be faulted in in a racy way by still | ||
387 | * running uffdio_threads reading zeropages after we zapped | ||
388 | * area_src (but they're guaranteed to get -EEXIST from | ||
389 | * UFFDIO_COPY without writing zero pages into area_dst | ||
390 | * because the background threads already completed). | ||
391 | */ | ||
392 | if (madvise(area_src, nr_pages * page_size, MADV_DONTNEED)) { | ||
393 | perror("madvise"); | ||
394 | return 1; | ||
395 | } | ||
396 | |||
397 | for (cpu = 0; cpu < nr_cpus; cpu++) { | ||
398 | char c; | ||
399 | if (bounces & BOUNCE_POLL) { | ||
400 | if (write(pipefd[cpu*2+1], &c, 1) != 1) { | ||
401 | fprintf(stderr, "pipefd write error\n"); | ||
402 | return 1; | ||
403 | } | ||
404 | if (pthread_join(uffd_threads[cpu], &_userfaults[cpu])) | ||
405 | return 1; | ||
406 | } else { | ||
407 | if (pthread_cancel(uffd_threads[cpu])) | ||
408 | return 1; | ||
409 | if (pthread_join(uffd_threads[cpu], NULL)) | ||
410 | return 1; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | finished = 1; | ||
415 | for (cpu = 0; cpu < nr_cpus; cpu++) | ||
416 | if (pthread_join(locking_threads[cpu], NULL)) | ||
417 | return 1; | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static int userfaultfd_stress(void) | ||
423 | { | ||
424 | void *area; | ||
425 | char *tmp_area; | ||
426 | unsigned long nr; | ||
427 | struct uffdio_register uffdio_register; | ||
428 | struct uffdio_api uffdio_api; | ||
429 | unsigned long cpu; | ||
430 | int uffd_flags; | ||
431 | unsigned long userfaults[nr_cpus]; | ||
432 | |||
433 | if (posix_memalign(&area, page_size, nr_pages * page_size)) { | ||
434 | fprintf(stderr, "out of memory\n"); | ||
435 | return 1; | ||
436 | } | ||
437 | area_src = area; | ||
438 | if (posix_memalign(&area, page_size, nr_pages * page_size)) { | ||
439 | fprintf(stderr, "out of memory\n"); | ||
440 | return 1; | ||
441 | } | ||
442 | area_dst = area; | ||
443 | |||
444 | uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); | ||
445 | if (uffd < 0) { | ||
446 | fprintf(stderr, | ||
447 | "userfaultfd syscall not available in this kernel\n"); | ||
448 | return 1; | ||
449 | } | ||
450 | uffd_flags = fcntl(uffd, F_GETFD, NULL); | ||
451 | |||
452 | uffdio_api.api = UFFD_API; | ||
453 | uffdio_api.features = 0; | ||
454 | if (ioctl(uffd, UFFDIO_API, &uffdio_api)) { | ||
455 | fprintf(stderr, "UFFDIO_API\n"); | ||
456 | return 1; | ||
457 | } | ||
458 | if (uffdio_api.api != UFFD_API) { | ||
459 | fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api); | ||
460 | return 1; | ||
461 | } | ||
462 | |||
463 | count_verify = malloc(nr_pages * sizeof(unsigned long long)); | ||
464 | if (!count_verify) { | ||
465 | perror("count_verify"); | ||
466 | return 1; | ||
467 | } | ||
468 | |||
469 | for (nr = 0; nr < nr_pages; nr++) { | ||
470 | *area_mutex(area_src, nr) = (pthread_mutex_t) | ||
471 | PTHREAD_MUTEX_INITIALIZER; | ||
472 | count_verify[nr] = *area_count(area_src, nr) = 1; | ||
473 | } | ||
474 | |||
475 | pipefd = malloc(sizeof(int) * nr_cpus * 2); | ||
476 | if (!pipefd) { | ||
477 | perror("pipefd"); | ||
478 | return 1; | ||
479 | } | ||
480 | for (cpu = 0; cpu < nr_cpus; cpu++) { | ||
481 | if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) { | ||
482 | perror("pipe"); | ||
483 | return 1; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | if (posix_memalign(&area, page_size, page_size)) { | ||
488 | fprintf(stderr, "out of memory\n"); | ||
489 | return 1; | ||
490 | } | ||
491 | zeropage = area; | ||
492 | bzero(zeropage, page_size); | ||
493 | |||
494 | pthread_mutex_lock(&uffd_read_mutex); | ||
495 | |||
496 | pthread_attr_init(&attr); | ||
497 | pthread_attr_setstacksize(&attr, 16*1024*1024); | ||
498 | |||
499 | while (bounces--) { | ||
500 | unsigned long expected_ioctls; | ||
501 | |||
502 | printf("bounces: %d, mode:", bounces); | ||
503 | if (bounces & BOUNCE_RANDOM) | ||
504 | printf(" rnd"); | ||
505 | if (bounces & BOUNCE_RACINGFAULTS) | ||
506 | printf(" racing"); | ||
507 | if (bounces & BOUNCE_VERIFY) | ||
508 | printf(" ver"); | ||
509 | if (bounces & BOUNCE_POLL) | ||
510 | printf(" poll"); | ||
511 | printf(", "); | ||
512 | fflush(stdout); | ||
513 | |||
514 | if (bounces & BOUNCE_POLL) | ||
515 | fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); | ||
516 | else | ||
517 | fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK); | ||
518 | |||
519 | /* register */ | ||
520 | uffdio_register.range.start = (unsigned long) area_dst; | ||
521 | uffdio_register.range.len = nr_pages * page_size; | ||
522 | uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; | ||
523 | if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { | ||
524 | fprintf(stderr, "register failure\n"); | ||
525 | return 1; | ||
526 | } | ||
527 | expected_ioctls = (1 << _UFFDIO_WAKE) | | ||
528 | (1 << _UFFDIO_COPY) | | ||
529 | (1 << _UFFDIO_ZEROPAGE); | ||
530 | if ((uffdio_register.ioctls & expected_ioctls) != | ||
531 | expected_ioctls) { | ||
532 | fprintf(stderr, | ||
533 | "unexpected missing ioctl for anon memory\n"); | ||
534 | return 1; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * The madvise done previously isn't enough: some | ||
539 | * uffd_thread could have read userfaults (one of | ||
540 | * those already resolved by the background thread) | ||
541 | * and it may be in the process of calling | ||
542 | * UFFDIO_COPY. UFFDIO_COPY will read the zapped | ||
543 | * area_src and it would map a zero page in it (of | ||
544 | * course such a UFFDIO_COPY is perfectly safe as it'd | ||
545 | * return -EEXIST). The problem comes at the next | ||
546 | * bounce though: that racing UFFDIO_COPY would | ||
547 | * generate zeropages in the area_src, so invalidating | ||
548 | * the previous MADV_DONTNEED. Without this additional | ||
549 | * MADV_DONTNEED those zeropages leftovers in the | ||
550 | * area_src would lead to -EEXIST failure during the | ||
551 | * next bounce, effectively leaving a zeropage in the | ||
552 | * area_dst. | ||
553 | * | ||
554 | * Try to comment this out madvise to see the memory | ||
555 | * corruption being caught pretty quick. | ||
556 | * | ||
557 | * khugepaged is also inhibited to collapse THP after | ||
558 | * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's | ||
559 | * required to MADV_DONTNEED here. | ||
560 | */ | ||
561 | if (madvise(area_dst, nr_pages * page_size, MADV_DONTNEED)) { | ||
562 | perror("madvise 2"); | ||
563 | return 1; | ||
564 | } | ||
565 | |||
566 | /* bounce pass */ | ||
567 | if (stress(userfaults)) | ||
568 | return 1; | ||
569 | |||
570 | /* unregister */ | ||
571 | if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) { | ||
572 | fprintf(stderr, "register failure\n"); | ||
573 | return 1; | ||
574 | } | ||
575 | |||
576 | /* verification */ | ||
577 | if (bounces & BOUNCE_VERIFY) { | ||
578 | for (nr = 0; nr < nr_pages; nr++) { | ||
579 | if (my_bcmp(area_dst, | ||
580 | area_dst + nr * page_size, | ||
581 | sizeof(pthread_mutex_t))) { | ||
582 | fprintf(stderr, | ||
583 | "error mutex 2 %lu\n", | ||
584 | nr); | ||
585 | bounces = 0; | ||
586 | } | ||
587 | if (*area_count(area_dst, nr) != count_verify[nr]) { | ||
588 | fprintf(stderr, | ||
589 | "error area_count %Lu %Lu %lu\n", | ||
590 | *area_count(area_src, nr), | ||
591 | count_verify[nr], | ||
592 | nr); | ||
593 | bounces = 0; | ||
594 | } | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* prepare next bounce */ | ||
599 | tmp_area = area_src; | ||
600 | area_src = area_dst; | ||
601 | area_dst = tmp_area; | ||
602 | |||
603 | printf("userfaults:"); | ||
604 | for (cpu = 0; cpu < nr_cpus; cpu++) | ||
605 | printf(" %lu", userfaults[cpu]); | ||
606 | printf("\n"); | ||
607 | } | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | int main(int argc, char **argv) | ||
613 | { | ||
614 | if (argc < 3) | ||
615 | fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); | ||
616 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | ||
617 | page_size = sysconf(_SC_PAGE_SIZE); | ||
618 | if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) > | ||
619 | page_size) | ||
620 | fprintf(stderr, "Impossible to run this test\n"), exit(2); | ||
621 | nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / | ||
622 | nr_cpus; | ||
623 | if (!nr_pages_per_cpu) { | ||
624 | fprintf(stderr, "invalid MiB\n"); | ||
625 | fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); | ||
626 | } | ||
627 | bounces = atoi(argv[2]); | ||
628 | if (bounces <= 0) { | ||
629 | fprintf(stderr, "invalid bounces\n"); | ||
630 | fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); | ||
631 | } | ||
632 | nr_pages = nr_pages_per_cpu * nr_cpus; | ||
633 | printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", | ||
634 | nr_pages, nr_pages_per_cpu); | ||
635 | return userfaultfd_stress(); | ||
636 | } | ||