aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/irq_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
-rw-r--r--arch/x86/kernel/irq_32.c83
1 files changed, 41 insertions, 42 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index d7fcbedc9c43..63ce838e5a54 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -55,16 +55,8 @@ static inline int check_stack_overflow(void) { return 0; }
55static inline void print_stack_overflow(void) { } 55static inline void print_stack_overflow(void) { }
56#endif 56#endif
57 57
58/* 58DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
59 * per-CPU IRQ handling contexts (thread information and stack) 59DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
60 */
61union irq_ctx {
62 struct thread_info tinfo;
63 u32 stack[THREAD_SIZE/sizeof(u32)];
64} __attribute__((aligned(THREAD_SIZE)));
65
66static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
67static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
68 60
69static void call_on_stack(void *func, void *stack) 61static void call_on_stack(void *func, void *stack)
70{ 62{
@@ -77,14 +69,26 @@ static void call_on_stack(void *func, void *stack)
77 : "memory", "cc", "edx", "ecx", "eax"); 69 : "memory", "cc", "edx", "ecx", "eax");
78} 70}
79 71
72/* how to get the current stack pointer from C */
73#define current_stack_pointer ({ \
74 unsigned long sp; \
75 asm("mov %%esp,%0" : "=g" (sp)); \
76 sp; \
77})
78
79static inline void *current_stack(void)
80{
81 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
82}
83
80static inline int 84static inline int
81execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) 85execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
82{ 86{
83 union irq_ctx *curctx, *irqctx; 87 struct irq_stack *curstk, *irqstk;
84 u32 *isp, arg1, arg2; 88 u32 *isp, *prev_esp, arg1, arg2;
85 89
86 curctx = (union irq_ctx *) current_thread_info(); 90 curstk = (struct irq_stack *) current_stack();
87 irqctx = __this_cpu_read(hardirq_ctx); 91 irqstk = __this_cpu_read(hardirq_stack);
88 92
89 /* 93 /*
90 * this is where we switch to the IRQ stack. However, if we are 94 * this is where we switch to the IRQ stack. However, if we are
@@ -92,13 +96,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
92 * handler) we can't do that and just have to keep using the 96 * handler) we can't do that and just have to keep using the
93 * current stack (which is the irq stack already after all) 97 * current stack (which is the irq stack already after all)
94 */ 98 */
95 if (unlikely(curctx == irqctx)) 99 if (unlikely(curstk == irqstk))
96 return 0; 100 return 0;
97 101
98 /* build the stack frame on the IRQ stack */ 102 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
99 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 103
100 irqctx->tinfo.task = curctx->tinfo.task; 104 /* Save the next esp at the bottom of the stack */
101 irqctx->tinfo.previous_esp = current_stack_pointer; 105 prev_esp = (u32 *)irqstk;
106 *prev_esp = current_stack_pointer;
102 107
103 if (unlikely(overflow)) 108 if (unlikely(overflow))
104 call_on_stack(print_stack_overflow, isp); 109 call_on_stack(print_stack_overflow, isp);
@@ -118,46 +123,40 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
118 */ 123 */
119void irq_ctx_init(int cpu) 124void irq_ctx_init(int cpu)
120{ 125{
121 union irq_ctx *irqctx; 126 struct irq_stack *irqstk;
122 127
123 if (per_cpu(hardirq_ctx, cpu)) 128 if (per_cpu(hardirq_stack, cpu))
124 return; 129 return;
125 130
126 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 131 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
127 THREADINFO_GFP, 132 THREADINFO_GFP,
128 THREAD_SIZE_ORDER)); 133 THREAD_SIZE_ORDER));
129 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 134 per_cpu(hardirq_stack, cpu) = irqstk;
130 irqctx->tinfo.cpu = cpu;
131 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
132
133 per_cpu(hardirq_ctx, cpu) = irqctx;
134 135
135 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 136 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
136 THREADINFO_GFP, 137 THREADINFO_GFP,
137 THREAD_SIZE_ORDER)); 138 THREAD_SIZE_ORDER));
138 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 139 per_cpu(softirq_stack, cpu) = irqstk;
139 irqctx->tinfo.cpu = cpu;
140 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
141
142 per_cpu(softirq_ctx, cpu) = irqctx;
143 140
144 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", 141 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
145 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 142 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
146} 143}
147 144
148void do_softirq_own_stack(void) 145void do_softirq_own_stack(void)
149{ 146{
150 struct thread_info *curctx; 147 struct thread_info *curstk;
151 union irq_ctx *irqctx; 148 struct irq_stack *irqstk;
152 u32 *isp; 149 u32 *isp, *prev_esp;
153 150
154 curctx = current_thread_info(); 151 curstk = current_stack();
155 irqctx = __this_cpu_read(softirq_ctx); 152 irqstk = __this_cpu_read(softirq_stack);
156 irqctx->tinfo.task = curctx->task;
157 irqctx->tinfo.previous_esp = current_stack_pointer;
158 153
159 /* build the stack frame on the softirq stack */ 154 /* build the stack frame on the softirq stack */
160 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); 155 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
156
157 /* Push the previous esp onto the stack */
158 prev_esp = (u32 *)irqstk;
159 *prev_esp = current_stack_pointer;
161 160
162 call_on_stack(__do_softirq, isp); 161 call_on_stack(__do_softirq, isp);
163} 162}