/* * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan * Copyright (C) 1996-2001 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit * mask register (of which only 16 are defined), hence the weird shifting * and complement of the cached_irq_mask. I want to be able to stuff * this right into the SIU SMASK register. * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx * to reduce code space and undefined function references. */ #undef DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_PPC64 #include #include #include #endif #define CREATE_TRACE_POINTS #include DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); int __irq_offset_value; #ifdef CONFIG_PPC32 EXPORT_SYMBOL(__irq_offset_value); atomic_t ppc_n_lost_interrupts; #ifdef CONFIG_TAU_INT extern int tau_initialized; extern int tau_interrupts(int); #endif #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 #ifndef CONFIG_SPARSE_IRQ EXPORT_SYMBOL(irq_desc); #endif int distribute_irqs = 1; static inline notrace unsigned long get_hard_enabled(void) { unsigned long enabled; __asm__ __volatile__("lbz %0,%1(13)" : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); return enabled; } static inline notrace void set_soft_enabled(unsigned long enable) { __asm__ __volatile__("stb %0,%1(13)" : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } static inline notrace void decrementer_check_overflow(void) { u64 now = get_tb_or_rtc(); u64 *next_tb = &__get_cpu_var(decrementers_next_tb); if (now >= *next_tb) set_dec(1); } notrace void arch_local_irq_restore(unsigned long en) { /* * get_paca()->soft_enabled = en; * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? * That was allowed before, and in such a case we do need to take care * that gcc will set soft_enabled directly via r13, not choose to use * an intermediate register, lest we're preempted to a different cpu. */ set_soft_enabled(en); if (!en) return; #ifdef CONFIG_PPC_STD_MMU_64 if (firmware_has_feature(FW_FEATURE_ISERIES)) { /* * Do we need to disable preemption here? Not really: in the * unlikely event that we're preempted to a different cpu in * between getting r13, loading its lppaca_ptr, and loading * its any_int, we might call iseries_handle_interrupts without * an interrupt pending on the new cpu, but that's no disaster, * is it? And the business of preempting us off the old cpu * would itself involve a local_irq_restore which handles the * interrupt to that cpu. * * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" * to avoid any preemption checking added into get_paca(). */ if (local_paca->lppaca_ptr->int_dword.any_int) iseries_handle_interrupts(); } #endif /* CONFIG_PPC_STD_MMU_64 */ /* * if (get_paca()->hard_enabled) return; * But again we need to take care that gcc gets hard_enabled directly * via r13, not choose to use an intermediate register, lest we're * preempted to a different cpu in between the two instructions. */ if (get_hard_enabled()) return; /* * Need to hard-enable interrupts here. Since currently disabled, * no need to take further asm precautions against preemption; but * use local_paca instead of get_paca() to avoid preemption checking. */ local_paca->hard_enabled = en; /* * Trigger the decrementer if we have a pending event. Some processors * only trigger on edge transitions of the sign bit. We might also * have disabled interrupts long enough that the decrementer wrapped * to positive. */ decrementer_check_overflow(); /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. */ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { u64 tmp, tmp2; lv1_get_version_info(&tmp, &tmp2); } __hard_irq_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); #endif /* CONFIG_PPC64 */ int arch_show_interrupts(struct seq_file *p, int prec) { int j; #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) if (tau_initialized) { seq_printf(p, "%*s: ", prec, "TAU"); for_each_online_cpu(j) seq_printf(p, "%10u ", tau_interrupts(j)); seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); } #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); seq_printf(p, " Local timer interrupts\n"); seq_printf(p, "%*s: ", prec, "SPU"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); seq_printf(p, " Spurious interrupts\n"); seq_printf(p, "%*s: ", prec, "CNT"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); seq_printf(p, " Performance monitoring interrupts\n"); seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); seq_printf(p, " Machine check exceptions\n"); return 0; } /* * /proc/stat helpers */ u64 arch_irq_stat_cpu(unsigned int cpu) { u64 sum = per_cpu(irq_stat, cpu).timer_irqs; sum += per_cpu(irq_stat, cpu).pmu_irqs; sum += per_cpu(irq_stat, cpu).mce_exceptions; sum += per_cpu(irq_stat, cpu).spurious_irqs; return sum; } #ifdef CONFIG_HOTPLUG_CPU void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); for_each_irq(irq) { struct irq_data *data; struct irq_chip *chip; desc = irq_to_desc(irq); if (!desc) continue; data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; chip = irq_data_get_irq_chip(data); cpumask_and(mask, data->affinity, map); if (cpumask_any(mask) >= nr_cpu_ids) { printk("Breaking affinity for irq %i\n", irq); cpumask_copy(mask, map); } if (chip->irq_set_affinity) chip->irq_set_affinity(data, mask, true); else if (desc->action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } free_cpumask_var(mask); local_irq_enable(); mdelay(1); local_irq_disable(); } #endif static inline void handle_one_irq(unsigned int irq) { struct thread_info *curtp, *irqtp; unsigned long saved_sp_limit; struct irq_desc *desc; desc = irq_to_desc(irq); if (!desc) return; /* Switch to the irq stack to handle this */ curtp = current_thread_info(); irqtp = hardirq_ctx[smp_processor_id()]; if (curtp == irqtp) { /* We're already on the irq stack, just handle it */ desc->handle_irq(irq, desc); return; } saved_sp_limit = current->thread.ksp_limit; irqtp->task = curtp->task; irqtp->flags = 0; /* Copy the softirq bits in preempt_count so that the * softirq checks work in the hardirq context. */ irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | (curtp->preempt_count & SOFTIRQ_MASK); current->thread.ksp_limit = (unsigned long)irqtp + _ALIGN_UP(sizeof(struct thread_info), 16); call_handle_irq(irq, desc, irqtp, desc->handle_irq); current->thread.ksp_limit = saved_sp_limit; irqtp->task = NULL; /* Set any flag that may have been set on the * alternate stack */ if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } static inline void check_stack_overflow(void) { #ifdef CONFIG_DEBUG_STACKOVERFLOW long sp; sp = __get_SP() & (THREAD_SIZE-1); /* check for stack overflow: is there less than 2KB free? */ if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { printk("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } #endif } void do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned int irq; trace_irq_entry(regs); irq_enter(); check_stack_overflow(); irq = ppc_md.get_irq(); if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) handle_one_irq(irq); else if (irq != NO_IRQ_IGNORE) __get_cpu_var(irq_stat).spurious_irqs++; irq_exit(); set_irq_regs(old_regs); #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && get_lppaca()->int_dword.fields.decr_int) { get_lppaca()->int_dword.fields.decr_int = 0; /* Signal a fake decrementer interrupt */ timer_interrupt(regs); } #endif trace_irq_exit(regs); } void __init init_IRQ(void) { if (ppc_md.init_IRQ) ppc_md.init_IRQ(); exc_lvl_ctx_init(); irq_ctx_init(); } #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; void exc_lvl_ctx_init(void) { struct thread_info *tp; int i, cpu_nr; for_each_possible_cpu(i) { #ifdef CONFIG_PPC64 cpu_nr = i; #else cpu_nr = get_hard_smp_processor_id(i); #endif memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; #ifdef CONFIG_BOOKE memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = dbgirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = 0; memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = mcheckirq_ctx[cpu_nr]; tp->cpu = cpu_nr; tp->preempt_count = HARDIRQ_OFFSET; #endif } } #endif struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; void irq_ctx_init(void) { struct thread_info *tp; int i; for_each_possible_cpu(i) { memset((void *)softirq_ctx[i], 0, THREAD_SIZE); tp = softirq_ctx[i]; tp->cpu = i; tp->preempt_count = 0; memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); tp = hardirq_ctx[i]; tp->cpu = i; tp->preempt_count = HARDIRQ_OFFSET; } } static inline void do_softirq_onstack(void) { struct thread_info *curtp, *irqtp; unsigned long saved_sp_limit = current->thread.ksp_limit; curtp = current_thread_info(); irqtp = softirq_ctx[smp_processor_id()]; irqtp->task = curtp->task; irqtp->flags = 0; current->thread.ksp_limit = (unsigned long)irqtp + _ALIGN_UP(sizeof(struct thread_info), 16); call_do_softirq(irqtp); current->thread.ksp_limit = saved_sp_limit; irqtp->task = NULL; /* Set any flag that may have been set on the * alternate stack */ if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } void do_softirq(void) { unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); if (local_softirq_pending()) do_softirq_onstack(); local_irq_restore(flags); } irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; } EXPORT_SYMBOL_GPL(irqd_to_hwirq); irq_hw_number_t virq_to_hw(unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; } EXPORT_SYMBOL_GPL(virq_to_hw); #ifdef CONFIG_SMP int irq_choose_cpu(const struct cpumask *mask) { int cpuid; if (cpumask_equal(mask, cpu_all_mask)) { static int irq_rover; static DEFINE_RAW_SPINLOCK(irq_rover_lock); unsigned long flags; /* Round-robin distribution... */ do_round_robin: raw_spin_lock_irqsave(&irq_rover_lock, flags); irq_rover = cpumask_next(irq_rover, cpu_online_mask); if (irq_rover >= nr_cpu_ids) irq_rover = cpumask_first(cpu_online_mask); cpuid = irq_rover; raw_spin_unlock_irqrestore(&irq_rover_lock, flags); } else { cpuid = cpumask_first_and(mask, cpu_online_mask); if (cpuid >= nr_cpu_ids) goto do_round_robin; } return get_hard_smp_processor_id(cpuid); } #else int irq_choose_cpu(const struct cpumask *mask) { return hard_smp_processor_id(); } #endif int arch_early_irq_init(void) { return 0; } #ifdef CONFIG_PPC64 static int __init setup_noirqdistrib(char *str) { distribute_irqs = 0; return 1; } __setup("noirqdistrib", setup_noirqdistrib); #endif /* CONFIG_PPC64 */