diff options
Diffstat (limited to 'arch/x86/include/asm/system.h')
-rw-r--r-- | arch/x86/include/asm/system.h | 67 |
1 files changed, 61 insertions, 6 deletions
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 8e626ea33a1a..c00bfdbdd456 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
23 | 23 | ||
24 | #ifdef CONFIG_X86_32 | 24 | #ifdef CONFIG_X86_32 |
25 | 25 | ||
26 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
27 | #define __switch_canary \ | ||
28 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | ||
29 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | ||
30 | #define __switch_canary_oparam \ | ||
31 | , [stack_canary] "=m" (per_cpu_var(stack_canary)) | ||
32 | #define __switch_canary_iparam \ | ||
33 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
34 | #else /* CC_STACKPROTECTOR */ | ||
35 | #define __switch_canary | ||
36 | #define __switch_canary_oparam | ||
37 | #define __switch_canary_iparam | ||
38 | #endif /* CC_STACKPROTECTOR */ | ||
39 | |||
26 | /* | 40 | /* |
27 | * Saving eflags is important. It switches not only IOPL between tasks, | 41 | * Saving eflags is important. It switches not only IOPL between tasks, |
28 | * it also protects other tasks from NT leaking through sysenter etc. | 42 | * it also protects other tasks from NT leaking through sysenter etc. |
@@ -44,6 +58,7 @@ do { \ | |||
44 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | 58 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ |
45 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | 59 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ |
46 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | 60 | "pushl %[next_ip]\n\t" /* restore EIP */ \ |
61 | __switch_canary \ | ||
47 | "jmp __switch_to\n" /* regparm call */ \ | 62 | "jmp __switch_to\n" /* regparm call */ \ |
48 | "1:\t" \ | 63 | "1:\t" \ |
49 | "popl %%ebp\n\t" /* restore EBP */ \ | 64 | "popl %%ebp\n\t" /* restore EBP */ \ |
@@ -58,6 +73,8 @@ do { \ | |||
58 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | 73 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ |
59 | "=S" (esi), "=D" (edi) \ | 74 | "=S" (esi), "=D" (edi) \ |
60 | \ | 75 | \ |
76 | __switch_canary_oparam \ | ||
77 | \ | ||
61 | /* input parameters: */ \ | 78 | /* input parameters: */ \ |
62 | : [next_sp] "m" (next->thread.sp), \ | 79 | : [next_sp] "m" (next->thread.sp), \ |
63 | [next_ip] "m" (next->thread.ip), \ | 80 | [next_ip] "m" (next->thread.ip), \ |
@@ -66,6 +83,8 @@ do { \ | |||
66 | [prev] "a" (prev), \ | 83 | [prev] "a" (prev), \ |
67 | [next] "d" (next) \ | 84 | [next] "d" (next) \ |
68 | \ | 85 | \ |
86 | __switch_canary_iparam \ | ||
87 | \ | ||
69 | : /* reloaded segment registers */ \ | 88 | : /* reloaded segment registers */ \ |
70 | "memory"); \ | 89 | "memory"); \ |
71 | } while (0) | 90 | } while (0) |
@@ -86,27 +105,44 @@ do { \ | |||
86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | 105 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ |
87 | "r12", "r13", "r14", "r15" | 106 | "r12", "r13", "r14", "r15" |
88 | 107 | ||
108 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
109 | #define __switch_canary \ | ||
110 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | ||
111 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | ||
112 | #define __switch_canary_oparam \ | ||
113 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | ||
114 | #define __switch_canary_iparam \ | ||
115 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | ||
116 | #else /* CC_STACKPROTECTOR */ | ||
117 | #define __switch_canary | ||
118 | #define __switch_canary_oparam | ||
119 | #define __switch_canary_iparam | ||
120 | #endif /* CC_STACKPROTECTOR */ | ||
121 | |||
89 | /* Save restore flags to clear handle leaking NT */ | 122 | /* Save restore flags to clear handle leaking NT */ |
90 | #define switch_to(prev, next, last) \ | 123 | #define switch_to(prev, next, last) \ |
91 | asm volatile(SAVE_CONTEXT \ | 124 | asm volatile(SAVE_CONTEXT \ |
92 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 125 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
93 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | 126 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
94 | "call __switch_to\n\t" \ | 127 | "call __switch_to\n\t" \ |
95 | ".globl thread_return\n" \ | 128 | ".globl thread_return\n" \ |
96 | "thread_return:\n\t" \ | 129 | "thread_return:\n\t" \ |
97 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ | 130 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
131 | __switch_canary \ | ||
98 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 132 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
99 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
100 | "movq %%rax,%%rdi\n\t" \ | 133 | "movq %%rax,%%rdi\n\t" \ |
101 | "jc ret_from_fork\n\t" \ | 134 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
135 | "jnz ret_from_fork\n\t" \ | ||
102 | RESTORE_CONTEXT \ | 136 | RESTORE_CONTEXT \ |
103 | : "=a" (last) \ | 137 | : "=a" (last) \ |
138 | __switch_canary_oparam \ | ||
104 | : [next] "S" (next), [prev] "D" (prev), \ | 139 | : [next] "S" (next), [prev] "D" (prev), \ |
105 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | 140 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ |
106 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | 141 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
107 | [tif_fork] "i" (TIF_FORK), \ | 142 | [_tif_fork] "i" (_TIF_FORK), \ |
108 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | 143 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
109 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ | 144 | [current_task] "m" (per_cpu_var(current_task)) \ |
145 | __switch_canary_iparam \ | ||
110 | : "memory", "cc" __EXTRA_CLOBBER) | 146 | : "memory", "cc" __EXTRA_CLOBBER) |
111 | #endif | 147 | #endif |
112 | 148 | ||
@@ -165,6 +201,25 @@ extern void native_load_gs_index(unsigned); | |||
165 | #define savesegment(seg, value) \ | 201 | #define savesegment(seg, value) \ |
166 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") | 202 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
167 | 203 | ||
204 | /* | ||
205 | * x86_32 user gs accessors. | ||
206 | */ | ||
207 | #ifdef CONFIG_X86_32 | ||
208 | #ifdef CONFIG_X86_32_LAZY_GS | ||
209 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) | ||
210 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) | ||
211 | #define task_user_gs(tsk) ((tsk)->thread.gs) | ||
212 | #define lazy_save_gs(v) savesegment(gs, (v)) | ||
213 | #define lazy_load_gs(v) loadsegment(gs, (v)) | ||
214 | #else /* X86_32_LAZY_GS */ | ||
215 | #define get_user_gs(regs) (u16)((regs)->gs) | ||
216 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) | ||
217 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) | ||
218 | #define lazy_save_gs(v) do { } while (0) | ||
219 | #define lazy_load_gs(v) do { } while (0) | ||
220 | #endif /* X86_32_LAZY_GS */ | ||
221 | #endif /* X86_32 */ | ||
222 | |||
168 | static inline unsigned long get_limit(unsigned long segment) | 223 | static inline unsigned long get_limit(unsigned long segment) |
169 | { | 224 | { |
170 | unsigned long __limit; | 225 | unsigned long __limit; |