aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:15:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:15:17 -0400
commit168f1a7163b37294a0ef33829e1ed54d41e33c42 (patch)
tree16fa34f24156c28f0a3060d984e98bf4df878f91 /tools
parent825a3b2605c3aa193e0075d0f9c72e33c17ab16a (diff)
parent4afd0565552c87f23834db9121dd9cf6955d0b43 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - MSR access API fixes and enhancements (Andy Lutomirski) - early exception handling improvements (Andy Lutomirski) - user-space FS/GS prctl usage fixes and improvements (Andy Lutomirski) - Remove the cpu_has_*() APIs and replace them with equivalents (Borislav Petkov) - task switch micro-optimization (Brian Gerst) - 32-bit entry code simplification (Denys Vlasenko) - enhance PAT handling in enumated CPUs (Toshi Kani) ... and lots of other cleanups/fixlets" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) x86/arch_prctl/64: Restore accidentally removed put_cpu() in ARCH_SET_GS x86/entry/32: Remove asmlinkage_protect() x86/entry/32: Remove GET_THREAD_INFO() from entry code x86/entry, sched/x86: Don't save/restore EFLAGS on task switch x86/asm/entry/32: Simplify pushes of zeroed pt_regs->REGs selftests/x86/ldt_gdt: Test set_thread_area() deletion of an active segment x86/tls: Synchronize segment registers in set_thread_area() x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase x86/arch_prctl/64: Remove FSBASE/GSBASE < 4G optimization x86/segments/64: When load_gs_index fails, clear the base x86/segments/64: When loadsegment(fs, ...) fails, clear the base x86/asm: Make asm/alternative.h safe from assembly x86/asm: Stop depending on ptrace.h in alternative.h x86/entry: Rename is_{ia32,x32}_task() to in_{ia32,x32}_syscall() x86/asm: Make sure verify_cpu() has a good stack x86/extable: Add a comment about early exception handlers x86/msr: Set the return value to zero when native_rdmsr_safe() fails x86/paravirt: Make "unsafe" MSR accesses unsafe even if PARAVIRT=y x86/paravirt: Add paravirt_{read,write}_msr() x86/msr: Carry on after a non-"safe" MSR access fails ...
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/x86/Makefile1
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c398
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c250
3 files changed, 649 insertions, 0 deletions
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index b47ebd170690..c73425de3cfe 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -9,6 +9,7 @@ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_sysc
9TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 9TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
10 test_FCMOV test_FCOMI test_FISTTP \ 10 test_FCMOV test_FCOMI test_FISTTP \
11 vdso_restorer 11 vdso_restorer
12TARGETS_C_64BIT_ONLY := fsgsbase
12 13
13TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) 14TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
14TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) 15TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
new file mode 100644
index 000000000000..5b2b4b3c634c
--- /dev/null
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -0,0 +1,398 @@
1/*
2 * fsgsbase.c, an fsgsbase test
3 * Copyright (c) 2014-2016 Andy Lutomirski
4 * GPL v2
5 */
6
7#define _GNU_SOURCE
8#include <stdio.h>
9#include <stdlib.h>
10#include <stdbool.h>
11#include <string.h>
12#include <sys/syscall.h>
13#include <unistd.h>
14#include <err.h>
15#include <sys/user.h>
16#include <asm/prctl.h>
17#include <sys/prctl.h>
18#include <signal.h>
19#include <limits.h>
20#include <sys/ucontext.h>
21#include <sched.h>
22#include <linux/futex.h>
23#include <pthread.h>
24#include <asm/ldt.h>
25#include <sys/mman.h>
26
27#ifndef __x86_64__
28# error This test is 64-bit only
29#endif
30
31static volatile sig_atomic_t want_segv;
32static volatile unsigned long segv_addr;
33
34static int nerrs;
35
36static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
37 int flags)
38{
39 struct sigaction sa;
40 memset(&sa, 0, sizeof(sa));
41 sa.sa_sigaction = handler;
42 sa.sa_flags = SA_SIGINFO | flags;
43 sigemptyset(&sa.sa_mask);
44 if (sigaction(sig, &sa, 0))
45 err(1, "sigaction");
46}
47
48static void clearhandler(int sig)
49{
50 struct sigaction sa;
51 memset(&sa, 0, sizeof(sa));
52 sa.sa_handler = SIG_DFL;
53 sigemptyset(&sa.sa_mask);
54 if (sigaction(sig, &sa, 0))
55 err(1, "sigaction");
56}
57
58static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
59{
60 ucontext_t *ctx = (ucontext_t*)ctx_void;
61
62 if (!want_segv) {
63 clearhandler(SIGSEGV);
64 return; /* Crash cleanly. */
65 }
66
67 want_segv = false;
68 segv_addr = (unsigned long)si->si_addr;
69
70 ctx->uc_mcontext.gregs[REG_RIP] += 4; /* Skip the faulting mov */
71
72}
73
74enum which_base { FS, GS };
75
76static unsigned long read_base(enum which_base which)
77{
78 unsigned long offset;
79 /*
80 * Unless we have FSGSBASE, there's no direct way to do this from
81 * user mode. We can get at it indirectly using signals, though.
82 */
83
84 want_segv = true;
85
86 offset = 0;
87 if (which == FS) {
88 /* Use a constant-length instruction here. */
89 asm volatile ("mov %%fs:(%%rcx), %%rax" : : "c" (offset) : "rax");
90 } else {
91 asm volatile ("mov %%gs:(%%rcx), %%rax" : : "c" (offset) : "rax");
92 }
93 if (!want_segv)
94 return segv_addr + offset;
95
96 /*
97 * If that didn't segfault, try the other end of the address space.
98 * Unless we get really unlucky and run into the vsyscall page, this
99 * is guaranteed to segfault.
100 */
101
102 offset = (ULONG_MAX >> 1) + 1;
103 if (which == FS) {
104 asm volatile ("mov %%fs:(%%rcx), %%rax"
105 : : "c" (offset) : "rax");
106 } else {
107 asm volatile ("mov %%gs:(%%rcx), %%rax"
108 : : "c" (offset) : "rax");
109 }
110 if (!want_segv)
111 return segv_addr + offset;
112
113 abort();
114}
115
116static void check_gs_value(unsigned long value)
117{
118 unsigned long base;
119 unsigned short sel;
120
121 printf("[RUN]\tARCH_SET_GS to 0x%lx\n", value);
122 if (syscall(SYS_arch_prctl, ARCH_SET_GS, value) != 0)
123 err(1, "ARCH_SET_GS");
124
125 asm volatile ("mov %%gs, %0" : "=rm" (sel));
126 base = read_base(GS);
127 if (base == value) {
128 printf("[OK]\tGSBASE was set as expected (selector 0x%hx)\n",
129 sel);
130 } else {
131 nerrs++;
132 printf("[FAIL]\tGSBASE was not as expected: got 0x%lx (selector 0x%hx)\n",
133 base, sel);
134 }
135
136 if (syscall(SYS_arch_prctl, ARCH_GET_GS, &base) != 0)
137 err(1, "ARCH_GET_GS");
138 if (base == value) {
139 printf("[OK]\tARCH_GET_GS worked as expected (selector 0x%hx)\n",
140 sel);
141 } else {
142 nerrs++;
143 printf("[FAIL]\tARCH_GET_GS was not as expected: got 0x%lx (selector 0x%hx)\n",
144 base, sel);
145 }
146}
147
148static void mov_0_gs(unsigned long initial_base, bool schedule)
149{
150 unsigned long base, arch_base;
151
152 printf("[RUN]\tARCH_SET_GS to 0x%lx then mov 0 to %%gs%s\n", initial_base, schedule ? " and schedule " : "");
153 if (syscall(SYS_arch_prctl, ARCH_SET_GS, initial_base) != 0)
154 err(1, "ARCH_SET_GS");
155
156 if (schedule)
157 usleep(10);
158
159 asm volatile ("mov %0, %%gs" : : "rm" (0));
160 base = read_base(GS);
161 if (syscall(SYS_arch_prctl, ARCH_GET_GS, &arch_base) != 0)
162 err(1, "ARCH_GET_GS");
163 if (base == arch_base) {
164 printf("[OK]\tGSBASE is 0x%lx\n", base);
165 } else {
166 nerrs++;
167 printf("[FAIL]\tGSBASE changed to 0x%lx but kernel reports 0x%lx\n", base, arch_base);
168 }
169}
170
171static volatile unsigned long remote_base;
172static volatile bool remote_hard_zero;
173static volatile unsigned int ftx;
174
175/*
176 * ARCH_SET_FS/GS(0) may or may not program a selector of zero. HARD_ZERO
177 * means to force the selector to zero to improve test coverage.
178 */
179#define HARD_ZERO 0xa1fa5f343cb85fa4
180
181static void do_remote_base()
182{
183 unsigned long to_set = remote_base;
184 bool hard_zero = false;
185 if (to_set == HARD_ZERO) {
186 to_set = 0;
187 hard_zero = true;
188 }
189
190 if (syscall(SYS_arch_prctl, ARCH_SET_GS, to_set) != 0)
191 err(1, "ARCH_SET_GS");
192
193 if (hard_zero)
194 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
195
196 unsigned short sel;
197 asm volatile ("mov %%gs, %0" : "=rm" (sel));
198 printf("\tother thread: ARCH_SET_GS(0x%lx)%s -- sel is 0x%hx\n",
199 to_set, hard_zero ? " and clear gs" : "", sel);
200}
201
202void do_unexpected_base(void)
203{
204 /*
205 * The goal here is to try to arrange for GS == 0, GSBASE !=
206 * 0, and for the the kernel the think that GSBASE == 0.
207 *
208 * To make the test as reliable as possible, this uses
209 * explicit descriptorss. (This is not the only way. This
210 * could use ARCH_SET_GS with a low, nonzero base, but the
211 * relevant side effect of ARCH_SET_GS could change.)
212 */
213
214 /* Step 1: tell the kernel that we have GSBASE == 0. */
215 if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
216 err(1, "ARCH_SET_GS");
217
218 /* Step 2: change GSBASE without telling the kernel. */
219 struct user_desc desc = {
220 .entry_number = 0,
221 .base_addr = 0xBAADF00D,
222 .limit = 0xfffff,
223 .seg_32bit = 1,
224 .contents = 0, /* Data, grow-up */
225 .read_exec_only = 0,
226 .limit_in_pages = 1,
227 .seg_not_present = 0,
228 .useable = 0
229 };
230 if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) {
231 printf("\tother thread: using LDT slot 0\n");
232 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0x7));
233 } else {
234 /* No modify_ldt for us (configured out, perhaps) */
235
236 struct user_desc *low_desc = mmap(
237 NULL, sizeof(desc),
238 PROT_READ | PROT_WRITE,
239 MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
240 memcpy(low_desc, &desc, sizeof(desc));
241
242 low_desc->entry_number = -1;
243
244 /* 32-bit set_thread_area */
245 long ret;
246 asm volatile ("int $0x80"
247 : "=a" (ret) : "a" (243), "b" (low_desc)
248 : "flags");
249 memcpy(&desc, low_desc, sizeof(desc));
250 munmap(low_desc, sizeof(desc));
251
252 if (ret != 0) {
253 printf("[NOTE]\tcould not create a segment -- test won't do anything\n");
254 return;
255 }
256 printf("\tother thread: using GDT slot %d\n", desc.entry_number);
257 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)((desc.entry_number << 3) | 0x3)));
258 }
259
260 /*
261 * Step 3: set the selector back to zero. On AMD chips, this will
262 * preserve GSBASE.
263 */
264
265 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
266}
267
268static void *threadproc(void *ctx)
269{
270 while (1) {
271 while (ftx == 0)
272 syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0);
273 if (ftx == 3)
274 return NULL;
275
276 if (ftx == 1)
277 do_remote_base();
278 else if (ftx == 2)
279 do_unexpected_base();
280 else
281 errx(1, "helper thread got bad command");
282
283 ftx = 0;
284 syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
285 }
286}
287
288static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
289{
290 unsigned long base;
291
292 bool hard_zero = false;
293 if (local == HARD_ZERO) {
294 hard_zero = true;
295 local = 0;
296 }
297
298 printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
299 local, hard_zero ? " and clear gs" : "", remote);
300 if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
301 err(1, "ARCH_SET_GS");
302 if (hard_zero)
303 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
304
305 if (read_base(GS) != local) {
306 nerrs++;
307 printf("[FAIL]\tGSBASE wasn't set as expected\n");
308 }
309
310 remote_base = remote;
311 ftx = 1;
312 syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
313 while (ftx != 0)
314 syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
315
316 base = read_base(GS);
317 if (base == local) {
318 printf("[OK]\tGSBASE remained 0x%lx\n", local);
319 } else {
320 nerrs++;
321 printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
322 }
323}
324
325static void test_unexpected_base(void)
326{
327 unsigned long base;
328
329 printf("[RUN]\tARCH_SET_GS(0), clear gs, then manipulate GSBASE in a different thread\n");
330 if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
331 err(1, "ARCH_SET_GS");
332 asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
333
334 ftx = 2;
335 syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
336 while (ftx != 0)
337 syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
338
339 base = read_base(GS);
340 if (base == 0) {
341 printf("[OK]\tGSBASE remained 0\n");
342 } else {
343 nerrs++;
344 printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
345 }
346}
347
348int main()
349{
350 pthread_t thread;
351
352 sethandler(SIGSEGV, sigsegv, 0);
353
354 check_gs_value(0);
355 check_gs_value(1);
356 check_gs_value(0x200000000);
357 check_gs_value(0);
358 check_gs_value(0x200000000);
359 check_gs_value(1);
360
361 for (int sched = 0; sched < 2; sched++) {
362 mov_0_gs(0, !!sched);
363 mov_0_gs(1, !!sched);
364 mov_0_gs(0x200000000, !!sched);
365 }
366
367 /* Set up for multithreading. */
368
369 cpu_set_t cpuset;
370 CPU_ZERO(&cpuset);
371 CPU_SET(0, &cpuset);
372 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
373 err(1, "sched_setaffinity to CPU 0"); /* should never fail */
374
375 if (pthread_create(&thread, 0, threadproc, 0) != 0)
376 err(1, "pthread_create");
377
378 static unsigned long bases_with_hard_zero[] = {
379 0, HARD_ZERO, 1, 0x200000000,
380 };
381
382 for (int local = 0; local < 4; local++) {
383 for (int remote = 0; remote < 4; remote++) {
384 set_gs_and_switch_to(bases_with_hard_zero[local],
385 bases_with_hard_zero[remote]);
386 }
387 }
388
389 test_unexpected_base();
390
391 ftx = 3; /* Kill the thread. */
392 syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
393
394 if (pthread_join(thread, NULL) != 0)
395 err(1, "pthread_join");
396
397 return nerrs == 0 ? 0 : 1;
398}
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 31a3035cd4eb..4af47079cf04 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -21,6 +21,9 @@
21#include <pthread.h> 21#include <pthread.h>
22#include <sched.h> 22#include <sched.h>
23#include <linux/futex.h> 23#include <linux/futex.h>
24#include <sys/mman.h>
25#include <asm/prctl.h>
26#include <sys/prctl.h>
24 27
25#define AR_ACCESSED (1<<8) 28#define AR_ACCESSED (1<<8)
26 29
@@ -44,6 +47,12 @@
44 47
45static int nerrs; 48static int nerrs;
46 49
50/* Points to an array of 1024 ints, each holding its own index. */
51static const unsigned int *counter_page;
52static struct user_desc *low_user_desc;
53static struct user_desc *low_user_desc_clear; /* Use to delete GDT entry */
54static int gdt_entry_num;
55
47static void check_invalid_segment(uint16_t index, int ldt) 56static void check_invalid_segment(uint16_t index, int ldt)
48{ 57{
49 uint32_t has_limit = 0, has_ar = 0, limit, ar; 58 uint32_t has_limit = 0, has_ar = 0, limit, ar;
@@ -561,16 +570,257 @@ static void do_exec_test(void)
561 } 570 }
562} 571}
563 572
573static void setup_counter_page(void)
574{
575 unsigned int *page = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
576 MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
577 if (page == MAP_FAILED)
578 err(1, "mmap");
579
580 for (int i = 0; i < 1024; i++)
581 page[i] = i;
582 counter_page = page;
583}
584
585static int invoke_set_thread_area(void)
586{
587 int ret;
588 asm volatile ("int $0x80"
589 : "=a" (ret), "+m" (low_user_desc) :
590 "a" (243), "b" (low_user_desc)
591 : "flags");
592 return ret;
593}
594
595static void setup_low_user_desc(void)
596{
597 low_user_desc = mmap(NULL, 2 * sizeof(struct user_desc),
598 PROT_READ | PROT_WRITE,
599 MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
600 if (low_user_desc == MAP_FAILED)
601 err(1, "mmap");
602
603 low_user_desc->entry_number = -1;
604 low_user_desc->base_addr = (unsigned long)&counter_page[1];
605 low_user_desc->limit = 0xfffff;
606 low_user_desc->seg_32bit = 1;
607 low_user_desc->contents = 0; /* Data, grow-up*/
608 low_user_desc->read_exec_only = 0;
609 low_user_desc->limit_in_pages = 1;
610 low_user_desc->seg_not_present = 0;
611 low_user_desc->useable = 0;
612
613 if (invoke_set_thread_area() == 0) {
614 gdt_entry_num = low_user_desc->entry_number;
615 printf("[NOTE]\tset_thread_area is available; will use GDT index %d\n", gdt_entry_num);
616 } else {
617 printf("[NOTE]\tset_thread_area is unavailable\n");
618 }
619
620 low_user_desc_clear = low_user_desc + 1;
621 low_user_desc_clear->entry_number = gdt_entry_num;
622 low_user_desc_clear->read_exec_only = 1;
623 low_user_desc_clear->seg_not_present = 1;
624}
625
626static void test_gdt_invalidation(void)
627{
628 if (!gdt_entry_num)
629 return; /* 64-bit only system -- we can't use set_thread_area */
630
631 unsigned short prev_sel;
632 unsigned short sel;
633 unsigned int eax;
634 const char *result;
635#ifdef __x86_64__
636 unsigned long saved_base;
637 unsigned long new_base;
638#endif
639
640 /* Test DS */
641 invoke_set_thread_area();
642 eax = 243;
643 sel = (gdt_entry_num << 3) | 3;
644 asm volatile ("movw %%ds, %[prev_sel]\n\t"
645 "movw %[sel], %%ds\n\t"
646#ifdef __i386__
647 "pushl %%ebx\n\t"
648#endif
649 "movl %[arg1], %%ebx\n\t"
650 "int $0x80\n\t" /* Should invalidate ds */
651#ifdef __i386__
652 "popl %%ebx\n\t"
653#endif
654 "movw %%ds, %[sel]\n\t"
655 "movw %[prev_sel], %%ds"
656 : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
657 "+a" (eax)
658 : "m" (low_user_desc_clear),
659 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
660 : "flags");
661
662 if (sel != 0) {
663 result = "FAIL";
664 nerrs++;
665 } else {
666 result = "OK";
667 }
668 printf("[%s]\tInvalidate DS with set_thread_area: new DS = 0x%hx\n",
669 result, sel);
670
671 /* Test ES */
672 invoke_set_thread_area();
673 eax = 243;
674 sel = (gdt_entry_num << 3) | 3;
675 asm volatile ("movw %%es, %[prev_sel]\n\t"
676 "movw %[sel], %%es\n\t"
677#ifdef __i386__
678 "pushl %%ebx\n\t"
679#endif
680 "movl %[arg1], %%ebx\n\t"
681 "int $0x80\n\t" /* Should invalidate es */
682#ifdef __i386__
683 "popl %%ebx\n\t"
684#endif
685 "movw %%es, %[sel]\n\t"
686 "movw %[prev_sel], %%es"
687 : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
688 "+a" (eax)
689 : "m" (low_user_desc_clear),
690 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
691 : "flags");
692
693 if (sel != 0) {
694 result = "FAIL";
695 nerrs++;
696 } else {
697 result = "OK";
698 }
699 printf("[%s]\tInvalidate ES with set_thread_area: new ES = 0x%hx\n",
700 result, sel);
701
702 /* Test FS */
703 invoke_set_thread_area();
704 eax = 243;
705 sel = (gdt_entry_num << 3) | 3;
706#ifdef __x86_64__
707 syscall(SYS_arch_prctl, ARCH_GET_FS, &saved_base);
708#endif
709 asm volatile ("movw %%fs, %[prev_sel]\n\t"
710 "movw %[sel], %%fs\n\t"
711#ifdef __i386__
712 "pushl %%ebx\n\t"
713#endif
714 "movl %[arg1], %%ebx\n\t"
715 "int $0x80\n\t" /* Should invalidate fs */
716#ifdef __i386__
717 "popl %%ebx\n\t"
718#endif
719 "movw %%fs, %[sel]\n\t"
720 : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
721 "+a" (eax)
722 : "m" (low_user_desc_clear),
723 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
724 : "flags");
725
726#ifdef __x86_64__
727 syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
728#endif
729
730 /* Restore FS/BASE for glibc */
731 asm volatile ("movw %[prev_sel], %%fs" : : [prev_sel] "rm" (prev_sel));
732#ifdef __x86_64__
733 if (saved_base)
734 syscall(SYS_arch_prctl, ARCH_SET_FS, saved_base);
735#endif
736
737 if (sel != 0) {
738 result = "FAIL";
739 nerrs++;
740 } else {
741 result = "OK";
742 }
743 printf("[%s]\tInvalidate FS with set_thread_area: new FS = 0x%hx\n",
744 result, sel);
745
746#ifdef __x86_64__
747 if (sel == 0 && new_base != 0) {
748 nerrs++;
749 printf("[FAIL]\tNew FSBASE was 0x%lx\n", new_base);
750 } else {
751 printf("[OK]\tNew FSBASE was zero\n");
752 }
753#endif
754
755 /* Test GS */
756 invoke_set_thread_area();
757 eax = 243;
758 sel = (gdt_entry_num << 3) | 3;
759#ifdef __x86_64__
760 syscall(SYS_arch_prctl, ARCH_GET_GS, &saved_base);
761#endif
762 asm volatile ("movw %%gs, %[prev_sel]\n\t"
763 "movw %[sel], %%gs\n\t"
764#ifdef __i386__
765 "pushl %%ebx\n\t"
766#endif
767 "movl %[arg1], %%ebx\n\t"
768 "int $0x80\n\t" /* Should invalidate gs */
769#ifdef __i386__
770 "popl %%ebx\n\t"
771#endif
772 "movw %%gs, %[sel]\n\t"
773 : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
774 "+a" (eax)
775 : "m" (low_user_desc_clear),
776 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
777 : "flags");
778
779#ifdef __x86_64__
780 syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
781#endif
782
783 /* Restore GS/BASE for glibc */
784 asm volatile ("movw %[prev_sel], %%gs" : : [prev_sel] "rm" (prev_sel));
785#ifdef __x86_64__
786 if (saved_base)
787 syscall(SYS_arch_prctl, ARCH_SET_GS, saved_base);
788#endif
789
790 if (sel != 0) {
791 result = "FAIL";
792 nerrs++;
793 } else {
794 result = "OK";
795 }
796 printf("[%s]\tInvalidate GS with set_thread_area: new GS = 0x%hx\n",
797 result, sel);
798
799#ifdef __x86_64__
800 if (sel == 0 && new_base != 0) {
801 nerrs++;
802 printf("[FAIL]\tNew GSBASE was 0x%lx\n", new_base);
803 } else {
804 printf("[OK]\tNew GSBASE was zero\n");
805 }
806#endif
807}
808
564int main(int argc, char **argv) 809int main(int argc, char **argv)
565{ 810{
566 if (argc == 1 && !strcmp(argv[0], "ldt_gdt_test_exec")) 811 if (argc == 1 && !strcmp(argv[0], "ldt_gdt_test_exec"))
567 return finish_exec_test(); 812 return finish_exec_test();
568 813
814 setup_counter_page();
815 setup_low_user_desc();
816
569 do_simple_tests(); 817 do_simple_tests();
570 818
571 do_multicpu_tests(); 819 do_multicpu_tests();
572 820
573 do_exec_test(); 821 do_exec_test();
574 822
823 test_gdt_invalidation();
824
575 return nerrs ? 1 : 0; 825 return nerrs ? 1 : 0;
576} 826}