aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/dumpstack_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/dumpstack_64.c')
-rw-r--r--arch/x86/kernel/dumpstack_64.c117
1 files changed, 83 insertions, 34 deletions
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index addb207dab92..346b1df2412e 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -104,6 +104,45 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
104 return (stack >= irq_stack && stack < irq_stack_end); 104 return (stack >= irq_stack && stack < irq_stack_end);
105} 105}
106 106
107static const unsigned long irq_stack_size =
108 (IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
109
110enum stack_type {
111 STACK_IS_UNKNOWN,
112 STACK_IS_NORMAL,
113 STACK_IS_EXCEPTION,
114 STACK_IS_IRQ,
115};
116
117static enum stack_type
118analyze_stack(int cpu, struct task_struct *task,
119 unsigned long *stack, unsigned long **stack_end, char **id)
120{
121 unsigned long *irq_stack;
122 unsigned long addr;
123 unsigned used = 0;
124
125 addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
126 if ((unsigned long)task_stack_page(task) == addr)
127 return STACK_IS_NORMAL;
128
129 *stack_end = in_exception_stack(cpu, (unsigned long)stack,
130 &used, id);
131 if (*stack_end)
132 return STACK_IS_EXCEPTION;
133
134 *stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
135 if (!*stack_end)
136 return STACK_IS_UNKNOWN;
137
138 irq_stack = *stack_end - irq_stack_size;
139
140 if (in_irq_stack(stack, irq_stack, *stack_end))
141 return STACK_IS_IRQ;
142
143 return STACK_IS_UNKNOWN;
144}
145
107/* 146/*
108 * x86-64 can have up to three kernel stacks: 147 * x86-64 can have up to three kernel stacks:
109 * process stack 148 * process stack
@@ -116,12 +155,11 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
116 const struct stacktrace_ops *ops, void *data) 155 const struct stacktrace_ops *ops, void *data)
117{ 156{
118 const unsigned cpu = get_cpu(); 157 const unsigned cpu = get_cpu();
119 unsigned long *irq_stack_end =
120 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
121 unsigned used = 0;
122 struct thread_info *tinfo; 158 struct thread_info *tinfo;
123 int graph = 0; 159 unsigned long *irq_stack;
124 unsigned long dummy; 160 unsigned long dummy;
161 int graph = 0;
162 int done = 0;
125 163
126 if (!task) 164 if (!task)
127 task = current; 165 task = current;
@@ -143,49 +181,60 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
143 * exceptions 181 * exceptions
144 */ 182 */
145 tinfo = task_thread_info(task); 183 tinfo = task_thread_info(task);
146 for (;;) { 184 while (!done) {
185 unsigned long *stack_end;
186 enum stack_type stype;
147 char *id; 187 char *id;
148 unsigned long *estack_end;
149 estack_end = in_exception_stack(cpu, (unsigned long)stack,
150 &used, &id);
151 188
152 if (estack_end) { 189 stype = analyze_stack(cpu, task, stack, &stack_end, &id);
190
191 /* Default finish unless specified to continue */
192 done = 1;
193
194 switch (stype) {
195
196 /* Break out early if we are on the thread stack */
197 case STACK_IS_NORMAL:
198 break;
199
200 case STACK_IS_EXCEPTION:
201
153 if (ops->stack(data, id) < 0) 202 if (ops->stack(data, id) < 0)
154 break; 203 break;
155 204
156 bp = ops->walk_stack(tinfo, stack, bp, ops, 205 bp = ops->walk_stack(tinfo, stack, bp, ops,
157 data, estack_end, &graph); 206 data, stack_end, &graph);
158 ops->stack(data, "<EOE>"); 207 ops->stack(data, "<EOE>");
159 /* 208 /*
160 * We link to the next stack via the 209 * We link to the next stack via the
161 * second-to-last pointer (index -2 to end) in the 210 * second-to-last pointer (index -2 to end) in the
162 * exception stack: 211 * exception stack:
163 */ 212 */
164 stack = (unsigned long *) estack_end[-2]; 213 stack = (unsigned long *) stack_end[-2];
165 continue; 214 done = 0;
166 } 215 break;
167 if (irq_stack_end) { 216
168 unsigned long *irq_stack; 217 case STACK_IS_IRQ:
169 irq_stack = irq_stack_end - 218
170 (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); 219 if (ops->stack(data, "IRQ") < 0)
171 220 break;
172 if (in_irq_stack(stack, irq_stack, irq_stack_end)) { 221 bp = ops->walk_stack(tinfo, stack, bp,
173 if (ops->stack(data, "IRQ") < 0) 222 ops, data, stack_end, &graph);
174 break; 223 /*
175 bp = ops->walk_stack(tinfo, stack, bp, 224 * We link to the next stack (which would be
176 ops, data, irq_stack_end, &graph); 225 * the process stack normally) the last
177 /* 226 * pointer (index -1 to end) in the IRQ stack:
178 * We link to the next stack (which would be 227 */
179 * the process stack normally) the last 228 stack = (unsigned long *) (stack_end[-1]);
180 * pointer (index -1 to end) in the IRQ stack: 229 irq_stack = stack_end - irq_stack_size;
181 */ 230 ops->stack(data, "EOI");
182 stack = (unsigned long *) (irq_stack_end[-1]); 231 done = 0;
183 irq_stack_end = NULL; 232 break;
184 ops->stack(data, "EOI"); 233
185 continue; 234 case STACK_IS_UNKNOWN:
186 } 235 ops->stack(data, "UNK");
236 break;
187 } 237 }
188 break;
189 } 238 }
190 239
191 /* 240 /*