diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 13:17:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 13:17:18 -0400 |
commit | 99f7b025bfadd7fac5216dcfb2a08312804674c0 (patch) | |
tree | b5318d3acc933eb21676e48e3bf7b428295347c2 /arch/x86/kernel | |
parent | a21e40877ad130de837b0394583e4f68dc2ab6c5 (diff) | |
parent | 6cce16f99d7be23cec7cabdf32a8166eec6e5393 (diff) |
Merge branch 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 threadinfo changes from Ingo Molnar:
"The main change here is the consolidation/unification of 32 and 64 bit
thread_info handling methods, from Steve Rostedt"
* 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, threadinfo: Redo "x86: Use inline assembler to get sp"
x86: Clean up dumpstack_64.c code
x86: Keep thread_info on thread stack in x86_32
x86: Prepare removal of previous_esp from i386 thread_info structure
x86: Nuke GET_THREAD_INFO_WITH_ESP() macro for i386
x86: Nuke the supervisor_stack field in i386 thread_info
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_32.c | 44 | ||||
-rw-r--r-- | arch/x86/kernel/dumpstack_64.c | 117 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 83 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 |
7 files changed, 177 insertions, 89 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cca53d88762a..a135239badb7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1079,6 +1079,10 @@ static __init int setup_disablecpuid(char *arg) | |||
1079 | } | 1079 | } |
1080 | __setup("clearcpuid=", setup_disablecpuid); | 1080 | __setup("clearcpuid=", setup_disablecpuid); |
1081 | 1081 | ||
1082 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | ||
1083 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
1084 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | ||
1085 | |||
1082 | #ifdef CONFIG_X86_64 | 1086 | #ifdef CONFIG_X86_64 |
1083 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; | 1087 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
1084 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, | 1088 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
@@ -1095,10 +1099,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | |||
1095 | &init_task; | 1099 | &init_task; |
1096 | EXPORT_PER_CPU_SYMBOL(current_task); | 1100 | EXPORT_PER_CPU_SYMBOL(current_task); |
1097 | 1101 | ||
1098 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | ||
1099 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
1100 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | ||
1101 | |||
1102 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 1102 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1103 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 1103 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
1104 | 1104 | ||
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index a21d49c071db..5abd4cd4230c 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -16,12 +16,35 @@ | |||
16 | 16 | ||
17 | #include <asm/stacktrace.h> | 17 | #include <asm/stacktrace.h> |
18 | 18 | ||
19 | static void *is_irq_stack(void *p, void *irq) | ||
20 | { | ||
21 | if (p < irq || p >= (irq + THREAD_SIZE)) | ||
22 | return NULL; | ||
23 | return irq + THREAD_SIZE; | ||
24 | } | ||
25 | |||
26 | |||
27 | static void *is_hardirq_stack(unsigned long *stack, int cpu) | ||
28 | { | ||
29 | void *irq = per_cpu(hardirq_stack, cpu); | ||
30 | |||
31 | return is_irq_stack(stack, irq); | ||
32 | } | ||
33 | |||
34 | static void *is_softirq_stack(unsigned long *stack, int cpu) | ||
35 | { | ||
36 | void *irq = per_cpu(softirq_stack, cpu); | ||
37 | |||
38 | return is_irq_stack(stack, irq); | ||
39 | } | ||
19 | 40 | ||
20 | void dump_trace(struct task_struct *task, struct pt_regs *regs, | 41 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
21 | unsigned long *stack, unsigned long bp, | 42 | unsigned long *stack, unsigned long bp, |
22 | const struct stacktrace_ops *ops, void *data) | 43 | const struct stacktrace_ops *ops, void *data) |
23 | { | 44 | { |
45 | const unsigned cpu = get_cpu(); | ||
24 | int graph = 0; | 46 | int graph = 0; |
47 | u32 *prev_esp; | ||
25 | 48 | ||
26 | if (!task) | 49 | if (!task) |
27 | task = current; | 50 | task = current; |
@@ -39,18 +62,31 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
39 | 62 | ||
40 | for (;;) { | 63 | for (;;) { |
41 | struct thread_info *context; | 64 | struct thread_info *context; |
65 | void *end_stack; | ||
66 | |||
67 | end_stack = is_hardirq_stack(stack, cpu); | ||
68 | if (!end_stack) | ||
69 | end_stack = is_softirq_stack(stack, cpu); | ||
42 | 70 | ||
43 | context = (struct thread_info *) | 71 | context = task_thread_info(task); |
44 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 72 | bp = ops->walk_stack(context, stack, bp, ops, data, |
45 | bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); | 73 | end_stack, &graph); |
46 | 74 | ||
47 | stack = (unsigned long *)context->previous_esp; | 75 | /* Stop if not on irq stack */ |
76 | if (!end_stack) | ||
77 | break; | ||
78 | |||
79 | /* The previous esp is saved on the bottom of the stack */ | ||
80 | prev_esp = (u32 *)(end_stack - THREAD_SIZE); | ||
81 | stack = (unsigned long *)*prev_esp; | ||
48 | if (!stack) | 82 | if (!stack) |
49 | break; | 83 | break; |
84 | |||
50 | if (ops->stack(data, "IRQ") < 0) | 85 | if (ops->stack(data, "IRQ") < 0) |
51 | break; | 86 | break; |
52 | touch_nmi_watchdog(); | 87 | touch_nmi_watchdog(); |
53 | } | 88 | } |
89 | put_cpu(); | ||
54 | } | 90 | } |
55 | EXPORT_SYMBOL(dump_trace); | 91 | EXPORT_SYMBOL(dump_trace); |
56 | 92 | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index addb207dab92..346b1df2412e 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -104,6 +104,45 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack, | |||
104 | return (stack >= irq_stack && stack < irq_stack_end); | 104 | return (stack >= irq_stack && stack < irq_stack_end); |
105 | } | 105 | } |
106 | 106 | ||
107 | static const unsigned long irq_stack_size = | ||
108 | (IRQ_STACK_SIZE - 64) / sizeof(unsigned long); | ||
109 | |||
110 | enum stack_type { | ||
111 | STACK_IS_UNKNOWN, | ||
112 | STACK_IS_NORMAL, | ||
113 | STACK_IS_EXCEPTION, | ||
114 | STACK_IS_IRQ, | ||
115 | }; | ||
116 | |||
117 | static enum stack_type | ||
118 | analyze_stack(int cpu, struct task_struct *task, | ||
119 | unsigned long *stack, unsigned long **stack_end, char **id) | ||
120 | { | ||
121 | unsigned long *irq_stack; | ||
122 | unsigned long addr; | ||
123 | unsigned used = 0; | ||
124 | |||
125 | addr = ((unsigned long)stack & (~(THREAD_SIZE - 1))); | ||
126 | if ((unsigned long)task_stack_page(task) == addr) | ||
127 | return STACK_IS_NORMAL; | ||
128 | |||
129 | *stack_end = in_exception_stack(cpu, (unsigned long)stack, | ||
130 | &used, id); | ||
131 | if (*stack_end) | ||
132 | return STACK_IS_EXCEPTION; | ||
133 | |||
134 | *stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu); | ||
135 | if (!*stack_end) | ||
136 | return STACK_IS_UNKNOWN; | ||
137 | |||
138 | irq_stack = *stack_end - irq_stack_size; | ||
139 | |||
140 | if (in_irq_stack(stack, irq_stack, *stack_end)) | ||
141 | return STACK_IS_IRQ; | ||
142 | |||
143 | return STACK_IS_UNKNOWN; | ||
144 | } | ||
145 | |||
107 | /* | 146 | /* |
108 | * x86-64 can have up to three kernel stacks: | 147 | * x86-64 can have up to three kernel stacks: |
109 | * process stack | 148 | * process stack |
@@ -116,12 +155,11 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
116 | const struct stacktrace_ops *ops, void *data) | 155 | const struct stacktrace_ops *ops, void *data) |
117 | { | 156 | { |
118 | const unsigned cpu = get_cpu(); | 157 | const unsigned cpu = get_cpu(); |
119 | unsigned long *irq_stack_end = | ||
120 | (unsigned long *)per_cpu(irq_stack_ptr, cpu); | ||
121 | unsigned used = 0; | ||
122 | struct thread_info *tinfo; | 158 | struct thread_info *tinfo; |
123 | int graph = 0; | 159 | unsigned long *irq_stack; |
124 | unsigned long dummy; | 160 | unsigned long dummy; |
161 | int graph = 0; | ||
162 | int done = 0; | ||
125 | 163 | ||
126 | if (!task) | 164 | if (!task) |
127 | task = current; | 165 | task = current; |
@@ -143,49 +181,60 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
143 | * exceptions | 181 | * exceptions |
144 | */ | 182 | */ |
145 | tinfo = task_thread_info(task); | 183 | tinfo = task_thread_info(task); |
146 | for (;;) { | 184 | while (!done) { |
185 | unsigned long *stack_end; | ||
186 | enum stack_type stype; | ||
147 | char *id; | 187 | char *id; |
148 | unsigned long *estack_end; | ||
149 | estack_end = in_exception_stack(cpu, (unsigned long)stack, | ||
150 | &used, &id); | ||
151 | 188 | ||
152 | if (estack_end) { | 189 | stype = analyze_stack(cpu, task, stack, &stack_end, &id); |
190 | |||
191 | /* Default finish unless specified to continue */ | ||
192 | done = 1; | ||
193 | |||
194 | switch (stype) { | ||
195 | |||
196 | /* Break out early if we are on the thread stack */ | ||
197 | case STACK_IS_NORMAL: | ||
198 | break; | ||
199 | |||
200 | case STACK_IS_EXCEPTION: | ||
201 | |||
153 | if (ops->stack(data, id) < 0) | 202 | if (ops->stack(data, id) < 0) |
154 | break; | 203 | break; |
155 | 204 | ||
156 | bp = ops->walk_stack(tinfo, stack, bp, ops, | 205 | bp = ops->walk_stack(tinfo, stack, bp, ops, |
157 | data, estack_end, &graph); | 206 | data, stack_end, &graph); |
158 | ops->stack(data, "<EOE>"); | 207 | ops->stack(data, "<EOE>"); |
159 | /* | 208 | /* |
160 | * We link to the next stack via the | 209 | * We link to the next stack via the |
161 | * second-to-last pointer (index -2 to end) in the | 210 | * second-to-last pointer (index -2 to end) in the |
162 | * exception stack: | 211 | * exception stack: |
163 | */ | 212 | */ |
164 | stack = (unsigned long *) estack_end[-2]; | 213 | stack = (unsigned long *) stack_end[-2]; |
165 | continue; | 214 | done = 0; |
166 | } | 215 | break; |
167 | if (irq_stack_end) { | 216 | |
168 | unsigned long *irq_stack; | 217 | case STACK_IS_IRQ: |
169 | irq_stack = irq_stack_end - | 218 | |
170 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); | 219 | if (ops->stack(data, "IRQ") < 0) |
171 | 220 | break; | |
172 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { | 221 | bp = ops->walk_stack(tinfo, stack, bp, |
173 | if (ops->stack(data, "IRQ") < 0) | 222 | ops, data, stack_end, &graph); |
174 | break; | 223 | /* |
175 | bp = ops->walk_stack(tinfo, stack, bp, | 224 | * We link to the next stack (which would be |
176 | ops, data, irq_stack_end, &graph); | 225 | * the process stack normally) the last |
177 | /* | 226 | * pointer (index -1 to end) in the IRQ stack: |
178 | * We link to the next stack (which would be | 227 | */ |
179 | * the process stack normally) the last | 228 | stack = (unsigned long *) (stack_end[-1]); |
180 | * pointer (index -1 to end) in the IRQ stack: | 229 | irq_stack = stack_end - irq_stack_size; |
181 | */ | 230 | ops->stack(data, "EOI"); |
182 | stack = (unsigned long *) (irq_stack_end[-1]); | 231 | done = 0; |
183 | irq_stack_end = NULL; | 232 | break; |
184 | ops->stack(data, "EOI"); | 233 | |
185 | continue; | 234 | case STACK_IS_UNKNOWN: |
186 | } | 235 | ops->stack(data, "UNK"); |
236 | break; | ||
187 | } | 237 | } |
188 | break; | ||
189 | } | 238 | } |
190 | 239 | ||
191 | /* | 240 | /* |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index d7fcbedc9c43..63ce838e5a54 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -55,16 +55,8 @@ static inline int check_stack_overflow(void) { return 0; } | |||
55 | static inline void print_stack_overflow(void) { } | 55 | static inline void print_stack_overflow(void) { } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* | 58 | DEFINE_PER_CPU(struct irq_stack *, hardirq_stack); |
59 | * per-CPU IRQ handling contexts (thread information and stack) | 59 | DEFINE_PER_CPU(struct irq_stack *, softirq_stack); |
60 | */ | ||
61 | union irq_ctx { | ||
62 | struct thread_info tinfo; | ||
63 | u32 stack[THREAD_SIZE/sizeof(u32)]; | ||
64 | } __attribute__((aligned(THREAD_SIZE))); | ||
65 | |||
66 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); | ||
67 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); | ||
68 | 60 | ||
69 | static void call_on_stack(void *func, void *stack) | 61 | static void call_on_stack(void *func, void *stack) |
70 | { | 62 | { |
@@ -77,14 +69,26 @@ static void call_on_stack(void *func, void *stack) | |||
77 | : "memory", "cc", "edx", "ecx", "eax"); | 69 | : "memory", "cc", "edx", "ecx", "eax"); |
78 | } | 70 | } |
79 | 71 | ||
72 | /* how to get the current stack pointer from C */ | ||
73 | #define current_stack_pointer ({ \ | ||
74 | unsigned long sp; \ | ||
75 | asm("mov %%esp,%0" : "=g" (sp)); \ | ||
76 | sp; \ | ||
77 | }) | ||
78 | |||
79 | static inline void *current_stack(void) | ||
80 | { | ||
81 | return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
82 | } | ||
83 | |||
80 | static inline int | 84 | static inline int |
81 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | 85 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) |
82 | { | 86 | { |
83 | union irq_ctx *curctx, *irqctx; | 87 | struct irq_stack *curstk, *irqstk; |
84 | u32 *isp, arg1, arg2; | 88 | u32 *isp, *prev_esp, arg1, arg2; |
85 | 89 | ||
86 | curctx = (union irq_ctx *) current_thread_info(); | 90 | curstk = (struct irq_stack *) current_stack(); |
87 | irqctx = __this_cpu_read(hardirq_ctx); | 91 | irqstk = __this_cpu_read(hardirq_stack); |
88 | 92 | ||
89 | /* | 93 | /* |
90 | * this is where we switch to the IRQ stack. However, if we are | 94 | * this is where we switch to the IRQ stack. However, if we are |
@@ -92,13 +96,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
92 | * handler) we can't do that and just have to keep using the | 96 | * handler) we can't do that and just have to keep using the |
93 | * current stack (which is the irq stack already after all) | 97 | * current stack (which is the irq stack already after all) |
94 | */ | 98 | */ |
95 | if (unlikely(curctx == irqctx)) | 99 | if (unlikely(curstk == irqstk)) |
96 | return 0; | 100 | return 0; |
97 | 101 | ||
98 | /* build the stack frame on the IRQ stack */ | 102 | isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); |
99 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); | 103 | |
100 | irqctx->tinfo.task = curctx->tinfo.task; | 104 | /* Save the next esp at the bottom of the stack */ |
101 | irqctx->tinfo.previous_esp = current_stack_pointer; | 105 | prev_esp = (u32 *)irqstk; |
106 | *prev_esp = current_stack_pointer; | ||
102 | 107 | ||
103 | if (unlikely(overflow)) | 108 | if (unlikely(overflow)) |
104 | call_on_stack(print_stack_overflow, isp); | 109 | call_on_stack(print_stack_overflow, isp); |
@@ -118,46 +123,40 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
118 | */ | 123 | */ |
119 | void irq_ctx_init(int cpu) | 124 | void irq_ctx_init(int cpu) |
120 | { | 125 | { |
121 | union irq_ctx *irqctx; | 126 | struct irq_stack *irqstk; |
122 | 127 | ||
123 | if (per_cpu(hardirq_ctx, cpu)) | 128 | if (per_cpu(hardirq_stack, cpu)) |
124 | return; | 129 | return; |
125 | 130 | ||
126 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | 131 | irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
127 | THREADINFO_GFP, | 132 | THREADINFO_GFP, |
128 | THREAD_SIZE_ORDER)); | 133 | THREAD_SIZE_ORDER)); |
129 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | 134 | per_cpu(hardirq_stack, cpu) = irqstk; |
130 | irqctx->tinfo.cpu = cpu; | ||
131 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
132 | |||
133 | per_cpu(hardirq_ctx, cpu) = irqctx; | ||
134 | 135 | ||
135 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), | 136 | irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), |
136 | THREADINFO_GFP, | 137 | THREADINFO_GFP, |
137 | THREAD_SIZE_ORDER)); | 138 | THREAD_SIZE_ORDER)); |
138 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | 139 | per_cpu(softirq_stack, cpu) = irqstk; |
139 | irqctx->tinfo.cpu = cpu; | ||
140 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
141 | |||
142 | per_cpu(softirq_ctx, cpu) = irqctx; | ||
143 | 140 | ||
144 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", | 141 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
145 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); | 142 | cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); |
146 | } | 143 | } |
147 | 144 | ||
148 | void do_softirq_own_stack(void) | 145 | void do_softirq_own_stack(void) |
149 | { | 146 | { |
150 | struct thread_info *curctx; | 147 | struct thread_info *curstk; |
151 | union irq_ctx *irqctx; | 148 | struct irq_stack *irqstk; |
152 | u32 *isp; | 149 | u32 *isp, *prev_esp; |
153 | 150 | ||
154 | curctx = current_thread_info(); | 151 | curstk = current_stack(); |
155 | irqctx = __this_cpu_read(softirq_ctx); | 152 | irqstk = __this_cpu_read(softirq_stack); |
156 | irqctx->tinfo.task = curctx->task; | ||
157 | irqctx->tinfo.previous_esp = current_stack_pointer; | ||
158 | 153 | ||
159 | /* build the stack frame on the softirq stack */ | 154 | /* build the stack frame on the softirq stack */ |
160 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); | 155 | isp = (u32 *) ((char *)irqstk + sizeof(*irqstk)); |
156 | |||
157 | /* Push the previous esp onto the stack */ | ||
158 | prev_esp = (u32 *)irqstk; | ||
159 | *prev_esp = current_stack_pointer; | ||
161 | 160 | ||
162 | call_on_stack(__do_softirq, isp); | 161 | call_on_stack(__do_softirq, isp); |
163 | } | 162 | } |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 0de43e98ce08..7bc86bbe7485 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -314,6 +314,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
314 | */ | 314 | */ |
315 | arch_end_context_switch(next_p); | 315 | arch_end_context_switch(next_p); |
316 | 316 | ||
317 | this_cpu_write(kernel_stack, | ||
318 | (unsigned long)task_stack_page(next_p) + | ||
319 | THREAD_SIZE - KERNEL_STACK_OFFSET); | ||
320 | |||
317 | /* | 321 | /* |
318 | * Restore %gs if needed (which is common) | 322 | * Restore %gs if needed (which is common) |
319 | */ | 323 | */ |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 7461f50d5bb1..678c0ada3b3c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -184,14 +184,14 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) | |||
184 | { | 184 | { |
185 | unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); | 185 | unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); |
186 | unsigned long sp = (unsigned long)®s->sp; | 186 | unsigned long sp = (unsigned long)®s->sp; |
187 | struct thread_info *tinfo; | 187 | u32 *prev_esp; |
188 | 188 | ||
189 | if (context == (sp & ~(THREAD_SIZE - 1))) | 189 | if (context == (sp & ~(THREAD_SIZE - 1))) |
190 | return sp; | 190 | return sp; |
191 | 191 | ||
192 | tinfo = (struct thread_info *)context; | 192 | prev_esp = (u32 *)(context); |
193 | if (tinfo->previous_esp) | 193 | if (prev_esp) |
194 | return tinfo->previous_esp; | 194 | return (unsigned long)prev_esp; |
195 | 195 | ||
196 | return (unsigned long)regs; | 196 | return (unsigned long)regs; |
197 | } | 197 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5aad5a370c85..34826934d4a7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -766,10 +766,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | |||
766 | #else | 766 | #else |
767 | clear_tsk_thread_flag(idle, TIF_FORK); | 767 | clear_tsk_thread_flag(idle, TIF_FORK); |
768 | initial_gs = per_cpu_offset(cpu); | 768 | initial_gs = per_cpu_offset(cpu); |
769 | #endif | ||
769 | per_cpu(kernel_stack, cpu) = | 770 | per_cpu(kernel_stack, cpu) = |
770 | (unsigned long)task_stack_page(idle) - | 771 | (unsigned long)task_stack_page(idle) - |
771 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 772 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
772 | #endif | ||
773 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 773 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
774 | initial_code = (unsigned long)start_secondary; | 774 | initial_code = (unsigned long)start_secondary; |
775 | stack_start = idle->thread.sp; | 775 | stack_start = idle->thread.sp; |