diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/irq_32.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
-rw-r--r-- | arch/x86/kernel/irq_32.c | 38 |
1 files changed, 13 insertions, 25 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 10709f29d166..72090705a656 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/percpu.h> | 19 | #include <linux/percpu.h> |
20 | #include <linux/mm.h> | ||
20 | 21 | ||
21 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
22 | 23 | ||
@@ -49,21 +50,17 @@ static inline int check_stack_overflow(void) { return 0; } | |||
49 | static inline void print_stack_overflow(void) { } | 50 | static inline void print_stack_overflow(void) { } |
50 | #endif | 51 | #endif |
51 | 52 | ||
52 | #ifdef CONFIG_4KSTACKS | ||
53 | /* | 53 | /* |
54 | * per-CPU IRQ handling contexts (thread information and stack) | 54 | * per-CPU IRQ handling contexts (thread information and stack) |
55 | */ | 55 | */ |
56 | union irq_ctx { | 56 | union irq_ctx { |
57 | struct thread_info tinfo; | 57 | struct thread_info tinfo; |
58 | u32 stack[THREAD_SIZE/sizeof(u32)]; | 58 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
59 | } __attribute__((aligned(PAGE_SIZE))); | 59 | } __attribute__((aligned(THREAD_SIZE))); |
60 | 60 | ||
61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); | 61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); | 62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); |
63 | 63 | ||
64 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); | ||
65 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); | ||
66 | |||
67 | static void call_on_stack(void *func, void *stack) | 64 | static void call_on_stack(void *func, void *stack) |
68 | { | 65 | { |
69 | asm volatile("xchgl %%ebx,%%esp \n" | 66 | asm volatile("xchgl %%ebx,%%esp \n" |
@@ -82,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
82 | u32 *isp, arg1, arg2; | 79 | u32 *isp, arg1, arg2; |
83 | 80 | ||
84 | curctx = (union irq_ctx *) current_thread_info(); | 81 | curctx = (union irq_ctx *) current_thread_info(); |
85 | irqctx = __get_cpu_var(hardirq_ctx); | 82 | irqctx = __this_cpu_read(hardirq_ctx); |
86 | 83 | ||
87 | /* | 84 | /* |
88 | * this is where we switch to the IRQ stack. However, if we are | 85 | * this is where we switch to the IRQ stack. However, if we are |
@@ -129,20 +126,21 @@ void __cpuinit irq_ctx_init(int cpu) | |||
129 | if (per_cpu(hardirq_ctx, cpu)) | 126 | if (per_cpu(hardirq_ctx, cpu)) |
130 | return; | 127 | return; |
131 | 128 | ||
132 | irqctx = &per_cpu(hardirq_stack, cpu); | 129 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
133 | irqctx->tinfo.task = NULL; | 130 | THREAD_FLAGS, |
134 | irqctx->tinfo.exec_domain = NULL; | 131 | THREAD_ORDER)); |
132 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | ||
135 | irqctx->tinfo.cpu = cpu; | 133 | irqctx->tinfo.cpu = cpu; |
136 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 134 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 135 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
138 | 136 | ||
139 | per_cpu(hardirq_ctx, cpu) = irqctx; | 137 | per_cpu(hardirq_ctx, cpu) = irqctx; |
140 | 138 | ||
141 | irqctx = &per_cpu(softirq_stack, cpu); | 139 | irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), |
142 | irqctx->tinfo.task = NULL; | 140 | THREAD_FLAGS, |
143 | irqctx->tinfo.exec_domain = NULL; | 141 | THREAD_ORDER)); |
142 | memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); | ||
144 | irqctx->tinfo.cpu = cpu; | 143 | irqctx->tinfo.cpu = cpu; |
145 | irqctx->tinfo.preempt_count = 0; | ||
146 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 144 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
147 | 145 | ||
148 | per_cpu(softirq_ctx, cpu) = irqctx; | 146 | per_cpu(softirq_ctx, cpu) = irqctx; |
@@ -151,11 +149,6 @@ void __cpuinit irq_ctx_init(int cpu) | |||
151 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); | 149 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
152 | } | 150 | } |
153 | 151 | ||
154 | void irq_ctx_exit(int cpu) | ||
155 | { | ||
156 | per_cpu(hardirq_ctx, cpu) = NULL; | ||
157 | } | ||
158 | |||
159 | asmlinkage void do_softirq(void) | 152 | asmlinkage void do_softirq(void) |
160 | { | 153 | { |
161 | unsigned long flags; | 154 | unsigned long flags; |
@@ -170,7 +163,7 @@ asmlinkage void do_softirq(void) | |||
170 | 163 | ||
171 | if (local_softirq_pending()) { | 164 | if (local_softirq_pending()) { |
172 | curctx = current_thread_info(); | 165 | curctx = current_thread_info(); |
173 | irqctx = __get_cpu_var(softirq_ctx); | 166 | irqctx = __this_cpu_read(softirq_ctx); |
174 | irqctx->tinfo.task = curctx->task; | 167 | irqctx->tinfo.task = curctx->task; |
175 | irqctx->tinfo.previous_esp = current_stack_pointer; | 168 | irqctx->tinfo.previous_esp = current_stack_pointer; |
176 | 169 | ||
@@ -179,7 +172,7 @@ asmlinkage void do_softirq(void) | |||
179 | 172 | ||
180 | call_on_stack(__do_softirq, isp); | 173 | call_on_stack(__do_softirq, isp); |
181 | /* | 174 | /* |
182 | * Shouldnt happen, we returned above if in_interrupt(): | 175 | * Shouldn't happen, we returned above if in_interrupt(): |
183 | */ | 176 | */ |
184 | WARN_ON_ONCE(softirq_count()); | 177 | WARN_ON_ONCE(softirq_count()); |
185 | } | 178 | } |
@@ -187,11 +180,6 @@ asmlinkage void do_softirq(void) | |||
187 | local_irq_restore(flags); | 180 | local_irq_restore(flags); |
188 | } | 181 | } |
189 | 182 | ||
190 | #else | ||
191 | static inline int | ||
192 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | ||
193 | #endif | ||
194 | |||
195 | bool handle_irq(unsigned irq, struct pt_regs *regs) | 183 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
196 | { | 184 | { |
197 | struct irq_desc *desc; | 185 | struct irq_desc *desc; |