diff options
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
| -rw-r--r-- | arch/x86/kernel/irq_32.c | 61 |
1 files changed, 21 insertions, 40 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 74b9ff7341e9..3b09634a5153 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
| 17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
| 19 | #include <linux/percpu.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/apic.h> | 21 | #include <asm/apic.h> |
| 21 | 22 | ||
| @@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { } | |||
| 55 | union irq_ctx { | 56 | union irq_ctx { |
| 56 | struct thread_info tinfo; | 57 | struct thread_info tinfo; |
| 57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | 58 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
| 58 | }; | 59 | } __attribute__((aligned(PAGE_SIZE))); |
| 59 | 60 | ||
| 60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
| 61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); |
| 62 | 63 | ||
| 63 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 64 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); |
| 64 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 65 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); |
| 65 | 66 | ||
| 66 | static void call_on_stack(void *func, void *stack) | 67 | static void call_on_stack(void *func, void *stack) |
| 67 | { | 68 | { |
| @@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
| 81 | u32 *isp, arg1, arg2; | 82 | u32 *isp, arg1, arg2; |
| 82 | 83 | ||
| 83 | curctx = (union irq_ctx *) current_thread_info(); | 84 | curctx = (union irq_ctx *) current_thread_info(); |
| 84 | irqctx = hardirq_ctx[smp_processor_id()]; | 85 | irqctx = __get_cpu_var(hardirq_ctx); |
| 85 | 86 | ||
| 86 | /* | 87 | /* |
| 87 | * this is where we switch to the IRQ stack. However, if we are | 88 | * this is where we switch to the IRQ stack. However, if we are |
| @@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu) | |||
| 125 | { | 126 | { |
| 126 | union irq_ctx *irqctx; | 127 | union irq_ctx *irqctx; |
| 127 | 128 | ||
| 128 | if (hardirq_ctx[cpu]) | 129 | if (per_cpu(hardirq_ctx, cpu)) |
| 129 | return; | 130 | return; |
| 130 | 131 | ||
| 131 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | 132 | irqctx = &per_cpu(hardirq_stack, cpu); |
| 132 | irqctx->tinfo.task = NULL; | 133 | irqctx->tinfo.task = NULL; |
| 133 | irqctx->tinfo.exec_domain = NULL; | 134 | irqctx->tinfo.exec_domain = NULL; |
| 134 | irqctx->tinfo.cpu = cpu; | 135 | irqctx->tinfo.cpu = cpu; |
| 135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 136 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 137 | 138 | ||
| 138 | hardirq_ctx[cpu] = irqctx; | 139 | per_cpu(hardirq_ctx, cpu) = irqctx; |
| 139 | 140 | ||
| 140 | irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; | 141 | irqctx = &per_cpu(softirq_stack, cpu); |
| 141 | irqctx->tinfo.task = NULL; | 142 | irqctx->tinfo.task = NULL; |
| 142 | irqctx->tinfo.exec_domain = NULL; | 143 | irqctx->tinfo.exec_domain = NULL; |
| 143 | irqctx->tinfo.cpu = cpu; | 144 | irqctx->tinfo.cpu = cpu; |
| 144 | irqctx->tinfo.preempt_count = 0; | 145 | irqctx->tinfo.preempt_count = 0; |
| 145 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 146 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 146 | 147 | ||
| 147 | softirq_ctx[cpu] = irqctx; | 148 | per_cpu(softirq_ctx, cpu) = irqctx; |
| 148 | 149 | ||
| 149 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", | 150 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
| 150 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | 151 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
| 151 | } | 152 | } |
| 152 | 153 | ||
| 153 | void irq_ctx_exit(int cpu) | 154 | void irq_ctx_exit(int cpu) |
| 154 | { | 155 | { |
| 155 | hardirq_ctx[cpu] = NULL; | 156 | per_cpu(hardirq_ctx, cpu) = NULL; |
| 156 | } | 157 | } |
| 157 | 158 | ||
| 158 | asmlinkage void do_softirq(void) | 159 | asmlinkage void do_softirq(void) |
| @@ -169,7 +170,7 @@ asmlinkage void do_softirq(void) | |||
| 169 | 170 | ||
| 170 | if (local_softirq_pending()) { | 171 | if (local_softirq_pending()) { |
| 171 | curctx = current_thread_info(); | 172 | curctx = current_thread_info(); |
| 172 | irqctx = softirq_ctx[smp_processor_id()]; | 173 | irqctx = __get_cpu_var(softirq_ctx); |
| 173 | irqctx->tinfo.task = curctx->task; | 174 | irqctx->tinfo.task = curctx->task; |
| 174 | irqctx->tinfo.previous_esp = current_stack_pointer; | 175 | irqctx->tinfo.previous_esp = current_stack_pointer; |
| 175 | 176 | ||
| @@ -191,33 +192,16 @@ static inline int | |||
| 191 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | 192 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } |
| 192 | #endif | 193 | #endif |
| 193 | 194 | ||
| 194 | /* | 195 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
| 195 | * do_IRQ handles all normal device IRQ's (the special | ||
| 196 | * SMP cross-CPU interrupts have their own specific | ||
| 197 | * handlers). | ||
| 198 | */ | ||
| 199 | unsigned int do_IRQ(struct pt_regs *regs) | ||
| 200 | { | 196 | { |
| 201 | struct pt_regs *old_regs; | ||
| 202 | /* high bit used in ret_from_ code */ | ||
| 203 | int overflow; | ||
| 204 | unsigned vector = ~regs->orig_ax; | ||
| 205 | struct irq_desc *desc; | 197 | struct irq_desc *desc; |
| 206 | unsigned irq; | 198 | int overflow; |
| 207 | |||
| 208 | |||
| 209 | old_regs = set_irq_regs(regs); | ||
| 210 | irq_enter(); | ||
| 211 | irq = __get_cpu_var(vector_irq)[vector]; | ||
| 212 | 199 | ||
| 213 | overflow = check_stack_overflow(); | 200 | overflow = check_stack_overflow(); |
| 214 | 201 | ||
| 215 | desc = irq_to_desc(irq); | 202 | desc = irq_to_desc(irq); |
| 216 | if (unlikely(!desc)) { | 203 | if (unlikely(!desc)) |
| 217 | printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", | 204 | return false; |
| 218 | __func__, irq, vector, smp_processor_id()); | ||
| 219 | BUG(); | ||
| 220 | } | ||
| 221 | 205 | ||
| 222 | if (!execute_on_irq_stack(overflow, desc, irq)) { | 206 | if (!execute_on_irq_stack(overflow, desc, irq)) { |
| 223 | if (unlikely(overflow)) | 207 | if (unlikely(overflow)) |
| @@ -225,13 +209,10 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
| 225 | desc->handle_irq(irq, desc); | 209 | desc->handle_irq(irq, desc); |
| 226 | } | 210 | } |
| 227 | 211 | ||
| 228 | irq_exit(); | 212 | return true; |
| 229 | set_irq_regs(old_regs); | ||
| 230 | return 1; | ||
| 231 | } | 213 | } |
| 232 | 214 | ||
| 233 | #ifdef CONFIG_HOTPLUG_CPU | 215 | #ifdef CONFIG_HOTPLUG_CPU |
| 234 | #include <mach_apic.h> | ||
| 235 | 216 | ||
| 236 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | 217 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
| 237 | void fixup_irqs(void) | 218 | void fixup_irqs(void) |
| @@ -248,7 +229,7 @@ void fixup_irqs(void) | |||
| 248 | if (irq == 2) | 229 | if (irq == 2) |
| 249 | continue; | 230 | continue; |
| 250 | 231 | ||
| 251 | affinity = &desc->affinity; | 232 | affinity = desc->affinity; |
| 252 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | 233 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 253 | printk("Breaking affinity for irq %i\n", irq); | 234 | printk("Breaking affinity for irq %i\n", irq); |
| 254 | affinity = cpu_all_mask; | 235 | affinity = cpu_all_mask; |
