diff options
author | Steven Rostedt <srostedt@redhat.com> | 2014-02-06 09:41:31 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-03-06 19:56:55 -0500 |
commit | 198d208df4371734ac4728f69cb585c284d20a15 (patch) | |
tree | f5a3f120d02678987f35d272653bef6a6d02efe5 /arch/x86/kernel | |
parent | 0788aa6a23cb9d693fc5040ec774b979f1e906cd (diff) |
x86: Keep thread_info on thread stack in x86_32
x86_64 uses a per_cpu variable kernel_stack to always point to
the thread stack of current. This is where the thread_info is stored
and is accessed from this location even when the irq or exception stack
is in use. This removes the complexity of having to maintain the
thread info on the stack when interrupts are running and having to
copy the preempt_count and other fields to the interrupt stack.
x86_32 uses the old method of copying the thread_info from the thread
stack to the exception stack just before executing the exception.
Having the two different requires #ifdefs and also the x86_32 way
is a bit of a pain to maintain. By converting x86_32 to the same
method of x86_64, we can remove #ifdefs, clean up the x86_32 code
a little, and remove the overhead of the copy.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20110806012354.263834829@goodmis.org
Link: http://lkml.kernel.org/r/20140206144321.852942014@goodmis.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_32.c | 41 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 74 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 |
6 files changed, 75 insertions, 56 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8e28bf2fc3ef..29c1944a98ac 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1078,6 +1078,10 @@ static __init int setup_disablecpuid(char *arg) | |||
1078 | } | 1078 | } |
1079 | __setup("clearcpuid=", setup_disablecpuid); | 1079 | __setup("clearcpuid=", setup_disablecpuid); |
1080 | 1080 | ||
1081 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | ||
1082 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
1083 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | ||
1084 | |||
1081 | #ifdef CONFIG_X86_64 | 1085 | #ifdef CONFIG_X86_64 |
1082 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; | 1086 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
1083 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, | 1087 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
@@ -1094,10 +1098,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | |||
1094 | &init_task; | 1098 | &init_task; |
1095 | EXPORT_PER_CPU_SYMBOL(current_task); | 1099 | EXPORT_PER_CPU_SYMBOL(current_task); |
1096 | 1100 | ||
1097 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | ||
1098 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
1099 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | ||
1100 | |||
1101 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 1101 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1102 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 1102 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
1103 | 1103 | ||
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 187d6a749c19..dca820b627d6 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -16,11 +16,33 @@ | |||
16 | 16 | ||
17 | #include <asm/stacktrace.h> | 17 | #include <asm/stacktrace.h> |
18 | 18 | ||
19 | static void *is_irq_stack(void *p, void *irq) | ||
20 | { | ||
21 | if (p < irq || p >= (irq + THREAD_SIZE)) | ||
22 | return NULL; | ||
23 | return irq + THREAD_SIZE; | ||
24 | } | ||
25 | |||
26 | |||
27 | static void *is_hardirq_stack(unsigned long *stack, int cpu) | ||
28 | { | ||
29 | void *irq = per_cpu(hardirq_stack, cpu); | ||
30 | |||
31 | return is_irq_stack(stack, irq); | ||
32 | } | ||
33 | |||
34 | static void *is_softirq_stack(unsigned long *stack, int cpu) | ||
35 | { | ||
36 | void *irq = per_cpu(softirq_stack, cpu); | ||
37 | |||
38 | return is_irq_stack(stack, irq); | ||
39 | } | ||
19 | 40 | ||
20 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 41 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
21 | unsigned long *stack, unsigned long bp, | 42 | unsigned long *stack, unsigned long bp, |
22 | const struct stacktrace_ops *ops, void *data) | 43 | const struct stacktrace_ops *ops, void *data) |
23 | { | 44 | { |
45 | const unsigned cpu = get_cpu(); | ||
24 | int graph = 0; | 46 | int graph = 0; |
25 | u32 *prev_esp; | 47 | u32 *prev_esp; |
26 | 48 | ||
@@ -40,18 +62,22 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
40 | 62 | ||
41 | for (;;) { | 63 | for (;;) { |
42 | struct thread_info *context; | 64 | struct thread_info *context; |
65 | void *end_stack; | ||
66 | |||
67 | end_stack = is_hardirq_stack(stack, cpu); | ||
68 | if (!end_stack) | ||
69 | end_stack = is_softirq_stack(stack, cpu); | ||
43 | 70 | ||
44 | context = (struct thread_info *) | 71 | context = task_thread_info(task); |
45 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 72 | bp = ops->walk_stack(context, stack, bp, ops, data, |
46 | bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); | 73 | end_stack, &graph); |
47 | 74 | ||
48 | /* Stop if not on irq stack */ | 75 | /* Stop if not on irq stack */ |
49 | if (task_stack_page(task) == context) | 76 | if (!end_stack) |
50 | break; | 77 | break; |
51 | 78 | ||
52 | /* The previous esp is just above the context */ | 79 | /* The previous esp is saved on the bottom of the stack */ |
53 | prev_esp = (u32 *) ((char *)context + sizeof(struct thread_info) - | 80 | prev_esp = (u32 *)(end_stack - THREAD_SIZE); |
54 | sizeof(long)); | ||
55 | stack = (unsigned long *)*prev_esp; | 81 | stack = (unsigned long *)*prev_esp; |
56 | if (!stack) | 82 | if (!stack) |
57 | break; | 83 | break; |
@@ -60,6 +86,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
60 | break; | 86 | break; |
61 | touch_nmi_watchdog(); | 87 | touch_nmi_watchdog(); |
62 | } | 88 | } |
89 | put_cpu(); | ||
63 | } | 90 | } |
64 | EXPORT_SYMBOL(dump_trace); | 91 | EXPORT_SYMBOL(dump_trace); |
65 | 92 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index f135cc2ff301..988dc8bcaebf 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -55,16 +55,8 @@ static inline int check_stack_overflow(void) { return 0; } | |||
55 | static inline void print_stack_overflow(void) { } | 55 | static inline void print_stack_overflow(void) { } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* | 58 | DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); |
59 | * per-CPU IRQ handling contexts (thread information and stack) | 59 | DEFINE_PER_CPU(struct irq_stack *, softirq_stack); |
60 | */ | ||
61 | union irq_ctx { | ||
62 | struct thread_info tinfo; | ||
63 | u32 stack[THREAD_SIZE/sizeof(u32)]; | ||
64 | } __attribute__((aligned(THREAD_SIZE))); | ||
65 | |||
66 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); | ||
67 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); | ||
68 | 60 | ||
69 | static void call_on_stack(void *func, void *stack) | 61 | static void call_on_stack(void *func, void *stack) |
70 | { | 62 | { |
@@ -77,14 +69,22 @@ static void call_on_stack(void *func, void *stack) | |||
77 | : "memory", "cc", "edx", "ecx", "eax"); | 69 | : "memory", "cc", "edx", "ecx", "eax"); |
78 | } | 70 | } |
79 | 71 | ||
72 | /* how to get the current stack pointer from C */ | ||
73 | register unsigned long current_stack_pointer asm("esp") __used; | ||
74 | |||
75 | static inline void *current_stack(void) | ||
76 | { | ||
77 | return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
78 | } | ||
79 | |||
80 | static inline int | 80 | static inline int |
81 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | 81 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) |
82 | { | 82 | { |
83 | union irq_ctx *curctx, *irqctx; | 83 | struct irq_stack *curstk, *irqstk; |
84 | u32 *isp, *prev_esp, arg1, arg2; | 84 | u32 *isp, *prev_esp, arg1, arg2; |
85 | 85 | ||
86 | curctx = (union irq_ctx *) current_thread_info(); | 86 | curstk = (struct irq_stack *) current_stack(); |
87 | irqctx = __this_cpu_read(hardirq_ctx); | 87 | irqstk = __this_cpu_read(hardirq_stack); |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * this is where we switch to the IRQ stack. However, if we are | 90 | * this is where we switch to the IRQ stack. However, if we are |
@@ -92,15 +92,13 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
92 | * handler) we can't do that and just have to keep using the | 92 | * handler) we can't do that and just have to keep using the |
93 | * current stack (which is the irq stack already after all) | 93 | * current stack (which is the irq stack already after all) |
94 | */ | 94 | */ |
95 | if (unlikely(curctx == irqctx)) | 95 | if (unlikely(curstk == irqstk)) |
96 | return 0; | 96 | return 0; |
97 | 97 | ||
98 | /* build the stack frame on the IRQ stack */ | 98 | isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); |
99 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); | 99 | |
100 | irqctx->tinfo.task = curctx->tinfo.task; | 100 | /* Save the next esp at the bottom of the stack */ |
101 | /* Save the next esp after thread_info */ | 101 | prev_esp = (u32 *)irqstk; |
102 | prev_esp = (u32 *) ((char *)irqctx + sizeof(struct thread_info) - | ||
103 | sizeof(long)); | ||
104 | *prev_esp = current_stack_pointer; | 102 | *prev_esp = current_stack_pointer; |
105 | 103 | ||
106 | if (unlikely(overflow)) | 104 | if (unlikely(overflow)) |
@@ -121,49 +119,39 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
121 | */ | 119 | */ |
122 | void irq_ctx_init(int cpu) | 120 | void irq_ctx_init(int cpu) |
123 | { | 121 | { |
124 | union irq_ctx *irqctx; | 122 | struct irq_stack *irqstk; |
125 | 123 | ||
126 | if (per_cpu(hardirq_ctx, cpu)) | 124 | if (per_cpu(hardirq_stack, cpu)) |
127 | return; | 125 | return; |
128 | 126 | ||
129 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | 127 | irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
130 | THREADINFO_GFP, | 128 | THREADINFO_GFP, |
131 | THREAD_SIZE_ORDER)); | 129 | THREAD_SIZE_ORDER)); |
132 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | 130 | per_cpu(hardirq_stack, cpu) = irqstk; |
133 | irqctx->tinfo.cpu = cpu; | ||
134 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
135 | 131 | ||
136 | per_cpu(hardirq_ctx, cpu) = irqctx; | 132 | irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
137 | |||
138 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | ||
139 | THREADINFO_GFP, | 133 | THREADINFO_GFP, |
140 | THREAD_SIZE_ORDER)); | 134 | THREAD_SIZE_ORDER)); |
141 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | 135 | per_cpu(softirq_stack, cpu) = irqstk; |
142 | irqctx->tinfo.cpu = cpu; | ||
143 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
144 | |||
145 | per_cpu(softirq_ctx, cpu) = irqctx; | ||
146 | 136 | ||
147 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", | 137 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
148 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); | 138 | cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); |
149 | } | 139 | } |
150 | 140 | ||
151 | void do_softirq_own_stack(void) | 141 | void do_softirq_own_stack(void) |
152 | { | 142 | { |
153 | struct thread_info *curctx; | 143 | struct thread_info *curstk; |
154 | union irq_ctx *irqctx; | 144 | struct irq_stack *irqstk; |
155 | u32 *isp, *prev_esp; | 145 | u32 *isp, *prev_esp; |
156 | 146 | ||
157 | curctx = current_thread_info(); | 147 | curstk = current_stack(); |
158 | irqctx = __this_cpu_read(softirq_ctx); | 148 | irqstk = __this_cpu_read(softirq_stack); |
159 | irqctx->tinfo.task = curctx->task; | ||
160 | 149 | ||
161 | /* build the stack frame on the softirq stack */ | 150 | /* build the stack frame on the softirq stack */ |
162 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); | 151 | isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); |
163 | 152 | ||
164 | /* Push the previous esp onto the stack */ | 153 | /* Push the previous esp onto the stack */ |
165 | prev_esp = (u32 *) ((char *)irqctx + sizeof(struct thread_info) - | 154 | prev_esp = (u32 *)irqstk; |
166 | sizeof(long)); | ||
167 | *prev_esp = current_stack_pointer; | 155 | *prev_esp = current_stack_pointer; |
168 | 156 | ||
169 | call_on_stack(__do_softirq, isp); | 157 | call_on_stack(__do_softirq, isp); |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 0de43e98ce08..7bc86bbe7485 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -314,6 +314,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
314 | */ | 314 | */ |
315 | arch_end_context_switch(next_p); | 315 | arch_end_context_switch(next_p); |
316 | 316 | ||
317 | this_cpu_write(kernel_stack, | ||
318 | (unsigned long)task_stack_page(next_p) + | ||
319 | THREAD_SIZE - KERNEL_STACK_OFFSET); | ||
320 | |||
317 | /* | 321 | /* |
318 | * Restore %gs if needed (which is common) | 322 | * Restore %gs if needed (which is common) |
319 | */ | 323 | */ |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index f352a7cc43a1..678c0ada3b3c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -189,7 +189,7 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) | |||
189 | if (context == (sp & ~(THREAD_SIZE - 1))) | 189 | if (context == (sp & ~(THREAD_SIZE - 1))) |
190 | return sp; | 190 | return sp; |
191 | 191 | ||
192 | prev_esp = (u32 *)(context + sizeof(struct thread_info) - sizeof(long)); | 192 | prev_esp = (u32 *)(context); |
193 | if (prev_esp) | 193 | if (prev_esp) |
194 | return (unsigned long)prev_esp; | 194 | return (unsigned long)prev_esp; |
195 | 195 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a32da804252e..867d53ea88a3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -758,10 +758,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | |||
758 | #else | 758 | #else |
759 | clear_tsk_thread_flag(idle, TIF_FORK); | 759 | clear_tsk_thread_flag(idle, TIF_FORK); |
760 | initial_gs = per_cpu_offset(cpu); | 760 | initial_gs = per_cpu_offset(cpu); |
761 | #endif | ||
761 | per_cpu(kernel_stack, cpu) = | 762 | per_cpu(kernel_stack, cpu) = |
762 | (unsigned long)task_stack_page(idle) - | 763 | (unsigned long)task_stack_page(idle) - |
763 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 764 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
764 | #endif | ||
765 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 765 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
766 | initial_code = (unsigned long)start_secondary; | 766 | initial_code = (unsigned long)start_secondary; |
767 | stack_start = idle->thread.sp; | 767 | stack_start = idle->thread.sp; |