aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-09 08:17:40 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 18:42:01 -0500
commit60a5317ff0f42dd313094b88f809f63041568b08 (patch)
tree307dfd9715fbc9ff83c3c3ae3b0e8f03888083f2 /arch/x86/kernel/cpu/common.c
parentccbeed3a05908d201b47b6c3dd1a373138bba566 (diff)
x86: implement x86_32 stack protector
Impact: stack protector for x86_32 Implement stack protector for x86_32. GDT entry 28 is used for it. It's set to point to stack_canary-20 and have the length of 24 bytes. CONFIG_CC_STACKPROTECTOR turns off CONFIG_X86_32_LAZY_GS and sets %gs to the stack canary segment on entry. As %gs is otherwise unused by the kernel, the canary can be anywhere. It's defined as a percpu variable. x86_32 exception handlers take register frame on stack directly as struct pt_regs. With -fstack-protector turned on, gcc copies the whole structure after the stack canary and (of course) doesn't copy back on return thus losing all changed. For now, -fno-stack-protector is added to all files which contain those functions. We definitely need something better. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 41b0de6df873..260fe4cb2c82 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -39,6 +39,7 @@
39#include <asm/sections.h> 39#include <asm/sections.h>
40#include <asm/setup.h> 40#include <asm/setup.h>
41#include <asm/hypervisor.h> 41#include <asm/hypervisor.h>
42#include <asm/stackprotector.h>
42 43
43#include "cpu.h" 44#include "cpu.h"
44 45
@@ -122,6 +123,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
122 123
123 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 124 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
124 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 125 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
126 GDT_STACK_CANARY_INIT
125#endif 127#endif
126} }; 128} };
127EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 129EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
@@ -261,6 +263,7 @@ void load_percpu_segment(int cpu)
261 loadsegment(gs, 0); 263 loadsegment(gs, 0);
262 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 264 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
263#endif 265#endif
266 load_stack_canary_segment();
264} 267}
265 268
266/* Current gdt points %fs at the "master" per-cpu area: after this, 269/* Current gdt points %fs at the "master" per-cpu area: after this,
@@ -946,16 +949,21 @@ unsigned long kernel_eflags;
946 */ 949 */
947DEFINE_PER_CPU(struct orig_ist, orig_ist); 950DEFINE_PER_CPU(struct orig_ist, orig_ist);
948 951
949#else 952#else /* x86_64 */
953
954#ifdef CONFIG_CC_STACKPROTECTOR
955DEFINE_PER_CPU(unsigned long, stack_canary);
956#endif
950 957
951/* Make sure %fs is initialized properly in idle threads */ 958/* Make sure %fs and %gs are initialized properly in idle threads */
952struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 959struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
953{ 960{
954 memset(regs, 0, sizeof(struct pt_regs)); 961 memset(regs, 0, sizeof(struct pt_regs));
955 regs->fs = __KERNEL_PERCPU; 962 regs->fs = __KERNEL_PERCPU;
963 regs->gs = __KERNEL_STACK_CANARY;
956 return regs; 964 return regs;
957} 965}
958#endif 966#endif /* x86_64 */
959 967
960/* 968/*
961 * cpu_init() initializes state that is per-CPU. Some data is already 969 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -1120,9 +1128,6 @@ void __cpuinit cpu_init(void)
1120 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1128 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1121#endif 1129#endif
1122 1130
1123 /* Clear %gs. */
1124 asm volatile ("mov %0, %%gs" : : "r" (0));
1125
1126 /* Clear all 6 debug registers: */ 1131 /* Clear all 6 debug registers: */
1127 set_debugreg(0, 0); 1132 set_debugreg(0, 0);
1128 set_debugreg(0, 1); 1133 set_debugreg(0, 1);