aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/system.h')
-rw-r--r--arch/x86/include/asm/system.h70
1 files changed, 64 insertions, 6 deletions
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1..643c59b4bc6 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -20,9 +20,26 @@
20struct task_struct; /* one of the stranger aspects of C forward declarations */ 20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev, 21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next); 22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
23 26
24#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
25 28
29#ifdef CONFIG_CC_STACKPROTECTOR
30#define __switch_canary \
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary))
35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */
38#define __switch_canary
39#define __switch_canary_oparam
40#define __switch_canary_iparam
41#endif /* CC_STACKPROTECTOR */
42
26/* 43/*
27 * Saving eflags is important. It switches not only IOPL between tasks, 44 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc. 45 * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +61,7 @@ do { \
44 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 61 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
45 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 62 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
46 "pushl %[next_ip]\n\t" /* restore EIP */ \ 63 "pushl %[next_ip]\n\t" /* restore EIP */ \
64 __switch_canary \
47 "jmp __switch_to\n" /* regparm call */ \ 65 "jmp __switch_to\n" /* regparm call */ \
48 "1:\t" \ 66 "1:\t" \
49 "popl %%ebp\n\t" /* restore EBP */ \ 67 "popl %%ebp\n\t" /* restore EBP */ \
@@ -58,6 +76,8 @@ do { \
58 "=b" (ebx), "=c" (ecx), "=d" (edx), \ 76 "=b" (ebx), "=c" (ecx), "=d" (edx), \
59 "=S" (esi), "=D" (edi) \ 77 "=S" (esi), "=D" (edi) \
60 \ 78 \
79 __switch_canary_oparam \
80 \
61 /* input parameters: */ \ 81 /* input parameters: */ \
62 : [next_sp] "m" (next->thread.sp), \ 82 : [next_sp] "m" (next->thread.sp), \
63 [next_ip] "m" (next->thread.ip), \ 83 [next_ip] "m" (next->thread.ip), \
@@ -66,6 +86,8 @@ do { \
66 [prev] "a" (prev), \ 86 [prev] "a" (prev), \
67 [next] "d" (next) \ 87 [next] "d" (next) \
68 \ 88 \
89 __switch_canary_iparam \
90 \
69 : /* reloaded segment registers */ \ 91 : /* reloaded segment registers */ \
70 "memory"); \ 92 "memory"); \
71} while (0) 93} while (0)
@@ -86,27 +108,44 @@ do { \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 108 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15" 109 "r12", "r13", "r14", "r15"
88 110
111#ifdef CONFIG_CC_STACKPROTECTOR
112#define __switch_canary \
113 "movq %P[task_canary](%%rsi),%%r8\n\t" \
114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
115#define __switch_canary_oparam \
116 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
117#define __switch_canary_iparam \
118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
119#else /* CC_STACKPROTECTOR */
120#define __switch_canary
121#define __switch_canary_oparam
122#define __switch_canary_iparam
123#endif /* CC_STACKPROTECTOR */
124
89/* Save restore flags to clear handle leaking NT */ 125/* Save restore flags to clear handle leaking NT */
90#define switch_to(prev, next, last) \ 126#define switch_to(prev, next, last) \
91 asm volatile(SAVE_CONTEXT \ 127 asm volatile(SAVE_CONTEXT \
92 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 128 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
93 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ 129 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
94 "call __switch_to\n\t" \ 130 "call __switch_to\n\t" \
95 ".globl thread_return\n" \ 131 ".globl thread_return\n" \
96 "thread_return:\n\t" \ 132 "thread_return:\n\t" \
97 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 133 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
134 __switch_canary \
98 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 135 "movq %P[thread_info](%%rsi),%%r8\n\t" \
99 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
100 "movq %%rax,%%rdi\n\t" \ 136 "movq %%rax,%%rdi\n\t" \
101 "jc ret_from_fork\n\t" \ 137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
138 "jnz ret_from_fork\n\t" \
102 RESTORE_CONTEXT \ 139 RESTORE_CONTEXT \
103 : "=a" (last) \ 140 : "=a" (last) \
141 __switch_canary_oparam \
104 : [next] "S" (next), [prev] "D" (prev), \ 142 : [next] "S" (next), [prev] "D" (prev), \
105 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 143 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
106 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
107 [tif_fork] "i" (TIF_FORK), \ 145 [_tif_fork] "i" (_TIF_FORK), \
108 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 146 [thread_info] "i" (offsetof(struct task_struct, stack)), \
109 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ 147 [current_task] "m" (per_cpu_var(current_task)) \
148 __switch_canary_iparam \
110 : "memory", "cc" __EXTRA_CLOBBER) 149 : "memory", "cc" __EXTRA_CLOBBER)
111#endif 150#endif
112 151
@@ -165,6 +204,25 @@ extern void native_load_gs_index(unsigned);
165#define savesegment(seg, value) \ 204#define savesegment(seg, value) \
166 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 205 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
167 206
207/*
208 * x86_32 user gs accessors.
209 */
210#ifdef CONFIG_X86_32
211#ifdef CONFIG_X86_32_LAZY_GS
212#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
213#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
214#define task_user_gs(tsk) ((tsk)->thread.gs)
215#define lazy_save_gs(v) savesegment(gs, (v))
216#define lazy_load_gs(v) loadsegment(gs, (v))
217#else /* X86_32_LAZY_GS */
218#define get_user_gs(regs) (u16)((regs)->gs)
219#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
220#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
221#define lazy_save_gs(v) do { } while (0)
222#define lazy_load_gs(v) do { } while (0)
223#endif /* X86_32_LAZY_GS */
224#endif /* X86_32 */
225
168static inline unsigned long get_limit(unsigned long segment) 226static inline unsigned long get_limit(unsigned long segment)
169{ 227{
170 unsigned long __limit; 228 unsigned long __limit;