aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
commita18f22a968de17b29f2310cdb7ba69163e65ec15 (patch)
treea7d56d88fad5e444d7661484109758a2f436129e /arch/mips
parenta1c57e0fec53defe745e64417eacdbd3618c3e66 (diff)
parent798778b8653f64b7b2162ac70eca10367cff6ce8 (diff)
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts: arch/ia64/kernel/cyclone.c arch/mips/kernel/i8253.c arch/x86/kernel/i8253.c Reason: Resolve conflicts so further cleanups do not conflict further Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig25
-rw-r--r--arch/mips/Makefile8
-rw-r--r--arch/mips/alchemy/common/clocks.c2
-rw-r--r--arch/mips/alchemy/common/irq.c98
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c24
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c15
-rw-r--r--arch/mips/alchemy/devboards/db1x00/board_setup.c50
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c2
-rw-r--r--arch/mips/alchemy/devboards/pb1100/board_setup.c8
-rw-r--r--arch/mips/alchemy/devboards/pb1200/board_setup.c2
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c16
-rw-r--r--arch/mips/alchemy/devboards/pb1550/board_setup.c6
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c14
-rw-r--r--arch/mips/alchemy/mtx-1/platform.c9
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c28
-rw-r--r--arch/mips/ar7/irq.c46
-rw-r--r--arch/mips/ath79/irq.c28
-rw-r--r--arch/mips/bcm63xx/boards/Makefile2
-rw-r--r--arch/mips/bcm63xx/irq.c81
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c1389
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c14
-rw-r--r--arch/mips/cavium-octeon/smp.c39
-rw-r--r--arch/mips/dec/ioasic-irq.c64
-rw-r--r--arch/mips/dec/kn02-irq.c25
-rw-r--r--arch/mips/emma/markeins/irq.c73
-rw-r--r--arch/mips/fw/arc/Makefile2
-rw-r--r--arch/mips/fw/arc/promlib.c2
-rw-r--r--arch/mips/include/asm/bitops.h3
-rw-r--r--arch/mips/include/asm/dec/prom.h2
-rw-r--r--arch/mips/include/asm/errno.h2
-rw-r--r--arch/mips/include/asm/floppy.h2
-rw-r--r--arch/mips/include/asm/futex.h39
-rw-r--r--arch/mips/include/asm/hw_irq.h2
-rw-r--r--arch/mips/include/asm/i8253.h5
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/ioctls.h1
-rw-r--r--arch/mips/include/asm/irq.h64
-rw-r--r--arch/mips/include/asm/irqflags.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h243
-rw-r--r--arch/mips/include/asm/mach-ip32/mc146818rtc.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h1
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536.h2
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1000.h2
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1200.h2
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1550.h2
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h2
-rw-r--r--arch/mips/include/asm/octeon/octeon.h2
-rw-r--r--arch/mips/include/asm/paccess.h2
-rw-r--r--arch/mips/include/asm/pci/bridge.h2
-rw-r--r--arch/mips/include/asm/perf_event.h12
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h21
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h343
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h2
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h17
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h144
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/sgi/ioc.h2
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mac.h4
-rw-r--r--arch/mips/include/asm/siginfo.h2
-rw-r--r--arch/mips/include/asm/sn/klconfig.h4
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h2
-rw-r--r--arch/mips/include/asm/spinlock.h22
-rw-r--r--arch/mips/include/asm/stackframe.h2
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/types.h8
-rw-r--r--arch/mips/include/asm/unistd.h24
-rw-r--r--arch/mips/include/asm/war.h2
-rw-r--r--arch/mips/jazz/irq.c16
-rw-r--r--arch/mips/jz4740/Makefile2
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c36
-rw-r--r--arch/mips/jz4740/gpio.c121
-rw-r--r--arch/mips/jz4740/irq.c34
-rw-r--r--arch/mips/jz4740/platform.c16
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/ftrace.c179
-rw-r--r--arch/mips/kernel/i8253.c75
-rw-r--r--arch/mips/kernel/i8259.c43
-rw-r--r--arch/mips/kernel/irq-gic.c45
-rw-r--r--arch/mips/kernel/irq-gt641xx.c30
-rw-r--r--arch/mips/kernel/irq-msc01.c63
-rw-r--r--arch/mips/kernel/irq-rm7000.c20
-rw-r--r--arch/mips/kernel/irq-rm9000.c53
-rw-r--r--arch/mips/kernel/irq.c51
-rw-r--r--arch/mips/kernel/irq_cpu.c50
-rw-r--r--arch/mips/kernel/irq_txx9.c32
-rw-r--r--arch/mips/kernel/perf_event.c345
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S4
-rw-r--r--arch/mips/kernel/scall64-64.S4
-rw-r--r--arch/mips/kernel/scall64-n32.S4
-rw-r--r--arch/mips/kernel/scall64-o32.S4
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c31
-rw-r--r--arch/mips/kernel/smtc.c15
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/time.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c6
-rw-r--r--arch/mips/lasat/interrupt.c18
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/loongson/Kconfig5
-rw-r--r--arch/mips/loongson/common/bonito-irq.c19
-rw-r--r--arch/mips/loongson/common/cmdline.c5
-rw-r--r--arch/mips/loongson/common/machtype.c3
-rw-r--r--arch/mips/math-emu/dp_fsp.c2
-rw-r--r--arch/mips/math-emu/dp_mul.c2
-rw-r--r--arch/mips/math-emu/dsemul.c2
-rw-r--r--arch/mips/math-emu/ieee754int.h4
-rw-r--r--arch/mips/math-emu/sp_mul.c2
-rw-r--r--arch/mips/mipssim/sim_smtc.c3
-rw-r--r--arch/mips/mm/cex-sb1.S2
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/tlbex.c4
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c12
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/pci/msi-octeon.c24
-rw-r--r--arch/mips/pci/ops-pmcmsp.c8
-rw-r--r--arch/mips/pci/pci-bcm1480.c2
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/mips/pmc-sierra/Kconfig19
-rw-r--r--arch/mips/pmc-sierra/msp71xx/Makefile8
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_eth.c187
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq.c56
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c241
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_per.c135
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c20
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_setup.c12
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_smp.c77
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_smtc.c105
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_time.c18
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_usb.c239
-rw-r--r--arch/mips/pmc-sierra/yosemite/Makefile2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c104
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/pnx8550/common/int.c28
-rw-r--r--arch/mips/powertv/Makefile2
-rw-r--r--arch/mips/powertv/asic/Makefile2
-rw-r--r--arch/mips/powertv/asic/irq_asic.c15
-rw-r--r--arch/mips/powertv/pci/Makefile2
-rw-r--r--arch/mips/rb532/irq.c36
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c62
-rw-r--r--arch/mips/sgi-ip27/Kconfig2
-rw-r--r--arch/mips/sgi-ip27/TODO2
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c42
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c13
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c174
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c58
-rw-r--r--arch/mips/sibyte/sb1250/irq.c56
-rw-r--r--arch/mips/sni/a20r.c25
-rw-r--r--arch/mips/sni/pcimt.c23
-rw-r--r--arch/mips/sni/pcit.c25
-rw-r--r--arch/mips/sni/rm200.c46
-rw-r--r--arch/mips/txx9/generic/irq_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/irq_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/irq_tx4939.c34
-rw-r--r--arch/mips/txx9/jmr3927/irq.c19
-rw-r--r--arch/mips/txx9/rbtx4927/irq.c60
-rw-r--r--arch/mips/txx9/rbtx4938/irq.c56
-rw-r--r--arch/mips/txx9/rbtx4939/irq.c18
-rw-r--r--arch/mips/vr41xx/common/icu.c76
-rw-r--r--arch/mips/vr41xx/common/irq.c19
176 files changed, 3861 insertions, 2797 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f5ecc0566bc..f7f6419e4c8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,7 @@ config MIPS
4 select HAVE_GENERIC_DMA_COHERENT 4 select HAVE_GENERIC_DMA_COHERENT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_IRQ_WORK
7 select HAVE_PERF_EVENTS 8 select HAVE_PERF_EVENTS
8 select PERF_USE_VMALLOC 9 select PERF_USE_VMALLOC
9 select HAVE_ARCH_KGDB 10 select HAVE_ARCH_KGDB
@@ -21,6 +22,7 @@ config MIPS
21 select HAVE_DMA_API_DEBUG 22 select HAVE_DMA_API_DEBUG
22 select HAVE_GENERIC_HARDIRQS 23 select HAVE_GENERIC_HARDIRQS
23 select GENERIC_IRQ_PROBE 24 select GENERIC_IRQ_PROBE
25 select GENERIC_IRQ_SHOW
24 select HAVE_ARCH_JUMP_LABEL 26 select HAVE_ARCH_JUMP_LABEL
25 27
26menu "Machine selection" 28menu "Machine selection"
@@ -208,6 +210,7 @@ config MACH_JZ4740
208 select ARCH_REQUIRE_GPIOLIB 210 select ARCH_REQUIRE_GPIOLIB
209 select SYS_HAS_EARLY_PRINTK 211 select SYS_HAS_EARLY_PRINTK
210 select HAVE_PWM 212 select HAVE_PWM
213 select HAVE_CLK
211 214
212config LASAT 215config LASAT
213 bool "LASAT Networks platforms" 216 bool "LASAT Networks platforms"
@@ -333,6 +336,8 @@ config PNX8550_STB810
333config PMC_MSP 336config PMC_MSP
334 bool "PMC-Sierra MSP chipsets" 337 bool "PMC-Sierra MSP chipsets"
335 depends on EXPERIMENTAL 338 depends on EXPERIMENTAL
339 select CEVT_R4K
340 select CSRC_R4K
336 select DMA_NONCOHERENT 341 select DMA_NONCOHERENT
337 select SWAP_IO_SPACE 342 select SWAP_IO_SPACE
338 select NO_EXCEPT_FILL 343 select NO_EXCEPT_FILL
@@ -773,6 +778,10 @@ config GENERIC_FIND_NEXT_BIT
773 bool 778 bool
774 default y 779 default y
775 780
781config GENERIC_FIND_BIT_LE
782 bool
783 default y
784
776config GENERIC_HWEIGHT 785config GENERIC_HWEIGHT
777 bool 786 bool
778 default y 787 default y
@@ -854,6 +863,9 @@ config GPIO_TXX9
854config CFE 863config CFE
855 bool 864 bool
856 865
866config ARCH_DMA_ADDR_T_64BIT
867 def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT
868
857config DMA_COHERENT 869config DMA_COHERENT
858 bool 870 bool
859 871
@@ -1123,7 +1135,7 @@ config CPU_LOONGSON2E
1123 The Loongson 2E processor implements the MIPS III instruction set 1135 The Loongson 2E processor implements the MIPS III instruction set
1124 with many extensions. 1136 with many extensions.
1125 1137
1126 It has an internal FPGA northbridge, which is compatiable to 1138 It has an internal FPGA northbridge, which is compatible to
1127 bonito64. 1139 bonito64.
1128 1140
1129config CPU_LOONGSON2F 1141config CPU_LOONGSON2F
@@ -2327,6 +2339,7 @@ config MMU
2327 2339
2328config I8253 2340config I8253
2329 bool 2341 bool
2342 select CLKSRC_I8253
2330 select MIPS_EXTERNAL_TIMER 2343 select MIPS_EXTERNAL_TIMER
2331 2344
2332config ZONE_DMA32 2345config ZONE_DMA32
@@ -2336,6 +2349,16 @@ source "drivers/pcmcia/Kconfig"
2336 2349
2337source "drivers/pci/hotplug/Kconfig" 2350source "drivers/pci/hotplug/Kconfig"
2338 2351
2352config RAPIDIO
2353 bool "RapidIO support"
2354 depends on PCI
2355 default n
2356 help
2357 If you say Y here, the kernel will include drivers and
2358 infrastructure code to support RapidIO interconnect devices.
2359
2360source "drivers/rapidio/Kconfig"
2361
2339endmenu 2362endmenu
2340 2363
2341menu "Executable file formats" 2364menu "Executable file formats"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 7c1102e41fe..53e3514ba10 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -101,7 +101,7 @@ cflags-y += -ffreestanding
101# carefully avoid to add it redundantly because gcc 3.3/3.4 complains 101# carefully avoid to add it redundantly because gcc 3.3/3.4 complains
102# when fed the toolchain default! 102# when fed the toolchain default!
103# 103#
104# Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of 104# Certain gcc versions up to gcc 4.1.1 (probably 4.2-subversion as of
105# 2006-10-10 don't properly change the predefined symbols if -EB / -EL 105# 2006-10-10 don't properly change the predefined symbols if -EB / -EL
106# are used, so we kludge that here. A bug has been filed at 106# are used, so we kludge that here. A bug has been filed at
107# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413. 107# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
@@ -286,11 +286,11 @@ CLEAN_FILES += vmlinux.32 vmlinux.64
286archprepare: 286archprepare:
287ifdef CONFIG_MIPS32_N32 287ifdef CONFIG_MIPS32_N32
288 @echo ' Checking missing-syscalls for N32' 288 @echo ' Checking missing-syscalls for N32'
289 $(Q)$(MAKE) $(build)=. missing-syscalls EXTRA_CFLAGS="-mabi=n32" 289 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=n32"
290endif 290endif
291ifdef CONFIG_MIPS32_O32 291ifdef CONFIG_MIPS32_O32
292 @echo ' Checking missing-syscalls for O32' 292 @echo ' Checking missing-syscalls for O32'
293 $(Q)$(MAKE) $(build)=. missing-syscalls EXTRA_CFLAGS="-mabi=32" 293 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=32"
294endif 294endif
295 295
296install: 296install:
@@ -314,5 +314,5 @@ define archhelp
314 echo ' vmlinuz.bin - Raw binary zboot image' 314 echo ' vmlinuz.bin - Raw binary zboot image'
315 echo ' vmlinuz.srec - SREC zboot image' 315 echo ' vmlinuz.srec - SREC zboot image'
316 echo 316 echo
317 echo ' These will be default as apropriate for a configured platform.' 317 echo ' These will be default as appropriate for a configured platform.'
318endef 318endef
diff --git a/arch/mips/alchemy/common/clocks.c b/arch/mips/alchemy/common/clocks.c
index af0fe41055a..f38298a8b98 100644
--- a/arch/mips/alchemy/common/clocks.c
+++ b/arch/mips/alchemy/common/clocks.c
@@ -75,7 +75,7 @@ void set_au1x00_uart_baud_base(unsigned long new_baud_base)
75 * counter, if it exists. If we don't have an accurate processor 75 * counter, if it exists. If we don't have an accurate processor
76 * speed, all of the peripherals that derive their clocks based on 76 * speed, all of the peripherals that derive their clocks based on
77 * this advertised speed will introduce error and sometimes not work 77 * this advertised speed will introduce error and sometimes not work
78 * properly. This function is futher convoluted to still allow configurations 78 * properly. This function is further convoluted to still allow configurations
79 * to do that in case they have really, really old silicon with a 79 * to do that in case they have really, really old silicon with a
80 * write-only PLL register. -- Dan 80 * write-only PLL register. -- Dan
81 */ 81 */
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 9f78ada83b3..55dd7c88851 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -39,7 +39,7 @@
39#include <asm/mach-pb1x00/pb1000.h> 39#include <asm/mach-pb1x00/pb1000.h>
40#endif 40#endif
41 41
42static int au1x_ic_settype(unsigned int irq, unsigned int flow_type); 42static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type);
43 43
44/* NOTE on interrupt priorities: The original writers of this code said: 44/* NOTE on interrupt priorities: The original writers of this code said:
45 * 45 *
@@ -218,17 +218,17 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = {
218}; 218};
219 219
220 220
221static void au1x_ic0_unmask(unsigned int irq_nr) 221static void au1x_ic0_unmask(struct irq_data *d)
222{ 222{
223 unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE; 223 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
224 au_writel(1 << bit, IC0_MASKSET); 224 au_writel(1 << bit, IC0_MASKSET);
225 au_writel(1 << bit, IC0_WAKESET); 225 au_writel(1 << bit, IC0_WAKESET);
226 au_sync(); 226 au_sync();
227} 227}
228 228
229static void au1x_ic1_unmask(unsigned int irq_nr) 229static void au1x_ic1_unmask(struct irq_data *d)
230{ 230{
231 unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE; 231 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
232 au_writel(1 << bit, IC1_MASKSET); 232 au_writel(1 << bit, IC1_MASKSET);
233 au_writel(1 << bit, IC1_WAKESET); 233 au_writel(1 << bit, IC1_WAKESET);
234 234
@@ -236,31 +236,31 @@ static void au1x_ic1_unmask(unsigned int irq_nr)
236 * nowhere in the current kernel sources is it disabled. --mlau 236 * nowhere in the current kernel sources is it disabled. --mlau
237 */ 237 */
238#if defined(CONFIG_MIPS_PB1000) 238#if defined(CONFIG_MIPS_PB1000)
239 if (irq_nr == AU1000_GPIO15_INT) 239 if (d->irq == AU1000_GPIO15_INT)
240 au_writel(0x4000, PB1000_MDR); /* enable int */ 240 au_writel(0x4000, PB1000_MDR); /* enable int */
241#endif 241#endif
242 au_sync(); 242 au_sync();
243} 243}
244 244
245static void au1x_ic0_mask(unsigned int irq_nr) 245static void au1x_ic0_mask(struct irq_data *d)
246{ 246{
247 unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE; 247 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
248 au_writel(1 << bit, IC0_MASKCLR); 248 au_writel(1 << bit, IC0_MASKCLR);
249 au_writel(1 << bit, IC0_WAKECLR); 249 au_writel(1 << bit, IC0_WAKECLR);
250 au_sync(); 250 au_sync();
251} 251}
252 252
253static void au1x_ic1_mask(unsigned int irq_nr) 253static void au1x_ic1_mask(struct irq_data *d)
254{ 254{
255 unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE; 255 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
256 au_writel(1 << bit, IC1_MASKCLR); 256 au_writel(1 << bit, IC1_MASKCLR);
257 au_writel(1 << bit, IC1_WAKECLR); 257 au_writel(1 << bit, IC1_WAKECLR);
258 au_sync(); 258 au_sync();
259} 259}
260 260
261static void au1x_ic0_ack(unsigned int irq_nr) 261static void au1x_ic0_ack(struct irq_data *d)
262{ 262{
263 unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE; 263 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
264 264
265 /* 265 /*
266 * This may assume that we don't get interrupts from 266 * This may assume that we don't get interrupts from
@@ -271,9 +271,9 @@ static void au1x_ic0_ack(unsigned int irq_nr)
271 au_sync(); 271 au_sync();
272} 272}
273 273
274static void au1x_ic1_ack(unsigned int irq_nr) 274static void au1x_ic1_ack(struct irq_data *d)
275{ 275{
276 unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE; 276 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
277 277
278 /* 278 /*
279 * This may assume that we don't get interrupts from 279 * This may assume that we don't get interrupts from
@@ -284,9 +284,9 @@ static void au1x_ic1_ack(unsigned int irq_nr)
284 au_sync(); 284 au_sync();
285} 285}
286 286
287static void au1x_ic0_maskack(unsigned int irq_nr) 287static void au1x_ic0_maskack(struct irq_data *d)
288{ 288{
289 unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE; 289 unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
290 290
291 au_writel(1 << bit, IC0_WAKECLR); 291 au_writel(1 << bit, IC0_WAKECLR);
292 au_writel(1 << bit, IC0_MASKCLR); 292 au_writel(1 << bit, IC0_MASKCLR);
@@ -295,9 +295,9 @@ static void au1x_ic0_maskack(unsigned int irq_nr)
295 au_sync(); 295 au_sync();
296} 296}
297 297
298static void au1x_ic1_maskack(unsigned int irq_nr) 298static void au1x_ic1_maskack(struct irq_data *d)
299{ 299{
300 unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE; 300 unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
301 301
302 au_writel(1 << bit, IC1_WAKECLR); 302 au_writel(1 << bit, IC1_WAKECLR);
303 au_writel(1 << bit, IC1_MASKCLR); 303 au_writel(1 << bit, IC1_MASKCLR);
@@ -306,9 +306,9 @@ static void au1x_ic1_maskack(unsigned int irq_nr)
306 au_sync(); 306 au_sync();
307} 307}
308 308
309static int au1x_ic1_setwake(unsigned int irq, unsigned int on) 309static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
310{ 310{
311 int bit = irq - AU1000_INTC1_INT_BASE; 311 int bit = d->irq - AU1000_INTC1_INT_BASE;
312 unsigned long wakemsk, flags; 312 unsigned long wakemsk, flags;
313 313
314 /* only GPIO 0-7 can act as wakeup source. Fortunately these 314 /* only GPIO 0-7 can act as wakeup source. Fortunately these
@@ -336,28 +336,30 @@ static int au1x_ic1_setwake(unsigned int irq, unsigned int on)
336 */ 336 */
337static struct irq_chip au1x_ic0_chip = { 337static struct irq_chip au1x_ic0_chip = {
338 .name = "Alchemy-IC0", 338 .name = "Alchemy-IC0",
339 .ack = au1x_ic0_ack, 339 .irq_ack = au1x_ic0_ack,
340 .mask = au1x_ic0_mask, 340 .irq_mask = au1x_ic0_mask,
341 .mask_ack = au1x_ic0_maskack, 341 .irq_mask_ack = au1x_ic0_maskack,
342 .unmask = au1x_ic0_unmask, 342 .irq_unmask = au1x_ic0_unmask,
343 .set_type = au1x_ic_settype, 343 .irq_set_type = au1x_ic_settype,
344}; 344};
345 345
346static struct irq_chip au1x_ic1_chip = { 346static struct irq_chip au1x_ic1_chip = {
347 .name = "Alchemy-IC1", 347 .name = "Alchemy-IC1",
348 .ack = au1x_ic1_ack, 348 .irq_ack = au1x_ic1_ack,
349 .mask = au1x_ic1_mask, 349 .irq_mask = au1x_ic1_mask,
350 .mask_ack = au1x_ic1_maskack, 350 .irq_mask_ack = au1x_ic1_maskack,
351 .unmask = au1x_ic1_unmask, 351 .irq_unmask = au1x_ic1_unmask,
352 .set_type = au1x_ic_settype, 352 .irq_set_type = au1x_ic_settype,
353 .set_wake = au1x_ic1_setwake, 353 .irq_set_wake = au1x_ic1_setwake,
354}; 354};
355 355
356static int au1x_ic_settype(unsigned int irq, unsigned int flow_type) 356static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
357{ 357{
358 struct irq_chip *chip; 358 struct irq_chip *chip;
359 unsigned long icr[6]; 359 unsigned long icr[6];
360 unsigned int bit, ic; 360 unsigned int bit, ic, irq = d->irq;
361 irq_flow_handler_t handler = NULL;
362 unsigned char *name = NULL;
361 int ret; 363 int ret;
362 364
363 if (irq >= AU1000_INTC1_INT_BASE) { 365 if (irq >= AU1000_INTC1_INT_BASE) {
@@ -387,47 +389,47 @@ static int au1x_ic_settype(unsigned int irq, unsigned int flow_type)
387 au_writel(1 << bit, icr[5]); 389 au_writel(1 << bit, icr[5]);
388 au_writel(1 << bit, icr[4]); 390 au_writel(1 << bit, icr[4]);
389 au_writel(1 << bit, icr[0]); 391 au_writel(1 << bit, icr[0]);
390 set_irq_chip_and_handler_name(irq, chip, 392 handler = handle_edge_irq;
391 handle_edge_irq, "riseedge"); 393 name = "riseedge";
392 break; 394 break;
393 case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ 395 case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
394 au_writel(1 << bit, icr[5]); 396 au_writel(1 << bit, icr[5]);
395 au_writel(1 << bit, icr[1]); 397 au_writel(1 << bit, icr[1]);
396 au_writel(1 << bit, icr[3]); 398 au_writel(1 << bit, icr[3]);
397 set_irq_chip_and_handler_name(irq, chip, 399 handler = handle_edge_irq;
398 handle_edge_irq, "falledge"); 400 name = "falledge";
399 break; 401 break;
400 case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ 402 case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
401 au_writel(1 << bit, icr[5]); 403 au_writel(1 << bit, icr[5]);
402 au_writel(1 << bit, icr[1]); 404 au_writel(1 << bit, icr[1]);
403 au_writel(1 << bit, icr[0]); 405 au_writel(1 << bit, icr[0]);
404 set_irq_chip_and_handler_name(irq, chip, 406 handler = handle_edge_irq;
405 handle_edge_irq, "bothedge"); 407 name = "bothedge";
406 break; 408 break;
407 case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ 409 case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
408 au_writel(1 << bit, icr[2]); 410 au_writel(1 << bit, icr[2]);
409 au_writel(1 << bit, icr[4]); 411 au_writel(1 << bit, icr[4]);
410 au_writel(1 << bit, icr[0]); 412 au_writel(1 << bit, icr[0]);
411 set_irq_chip_and_handler_name(irq, chip, 413 handler = handle_level_irq;
412 handle_level_irq, "hilevel"); 414 name = "hilevel";
413 break; 415 break;
414 case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ 416 case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
415 au_writel(1 << bit, icr[2]); 417 au_writel(1 << bit, icr[2]);
416 au_writel(1 << bit, icr[1]); 418 au_writel(1 << bit, icr[1]);
417 au_writel(1 << bit, icr[3]); 419 au_writel(1 << bit, icr[3]);
418 set_irq_chip_and_handler_name(irq, chip, 420 handler = handle_level_irq;
419 handle_level_irq, "lowlevel"); 421 name = "lowlevel";
420 break; 422 break;
421 case IRQ_TYPE_NONE: /* 0:0:0 */ 423 case IRQ_TYPE_NONE: /* 0:0:0 */
422 au_writel(1 << bit, icr[5]); 424 au_writel(1 << bit, icr[5]);
423 au_writel(1 << bit, icr[4]); 425 au_writel(1 << bit, icr[4]);
424 au_writel(1 << bit, icr[3]); 426 au_writel(1 << bit, icr[3]);
425 /* set at least chip so we can call set_irq_type() on it */
426 set_irq_chip(irq, chip);
427 break; 427 break;
428 default: 428 default:
429 ret = -EINVAL; 429 ret = -EINVAL;
430 } 430 }
431 __irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
432
431 au_sync(); 433 au_sync();
432 434
433 return ret; 435 return ret;
@@ -504,11 +506,11 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map)
504 */ 506 */
505 for (i = AU1000_INTC0_INT_BASE; 507 for (i = AU1000_INTC0_INT_BASE;
506 (i < AU1000_INTC0_INT_BASE + 32); i++) 508 (i < AU1000_INTC0_INT_BASE + 32); i++)
507 au1x_ic_settype(i, IRQ_TYPE_NONE); 509 au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
508 510
509 for (i = AU1000_INTC1_INT_BASE; 511 for (i = AU1000_INTC1_INT_BASE;
510 (i < AU1000_INTC1_INT_BASE + 32); i++) 512 (i < AU1000_INTC1_INT_BASE + 32); i++)
511 au1x_ic_settype(i, IRQ_TYPE_NONE); 513 au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
512 514
513 /* 515 /*
514 * Initialize IC0, which is fixed per processor. 516 * Initialize IC0, which is fixed per processor.
@@ -526,7 +528,7 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map)
526 au_writel(1 << bit, IC0_ASSIGNSET); 528 au_writel(1 << bit, IC0_ASSIGNSET);
527 } 529 }
528 530
529 au1x_ic_settype(irq_nr, map->im_type); 531 au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type);
530 ++map; 532 ++map;
531 } 533 }
532 534
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index c52af8821da..596ad00e7f0 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -97,26 +97,26 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
97 * CPLD generates tons of spurious interrupts (at least on my DB1200). 97 * CPLD generates tons of spurious interrupts (at least on my DB1200).
98 * -- mlau 98 * -- mlau
99 */ 99 */
100static void bcsr_irq_mask(unsigned int irq_nr) 100static void bcsr_irq_mask(struct irq_data *d)
101{ 101{
102 unsigned short v = 1 << (irq_nr - bcsr_csc_base); 102 unsigned short v = 1 << (d->irq - bcsr_csc_base);
103 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR); 103 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
104 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR); 104 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
105 wmb(); 105 wmb();
106} 106}
107 107
108static void bcsr_irq_maskack(unsigned int irq_nr) 108static void bcsr_irq_maskack(struct irq_data *d)
109{ 109{
110 unsigned short v = 1 << (irq_nr - bcsr_csc_base); 110 unsigned short v = 1 << (d->irq - bcsr_csc_base);
111 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR); 111 __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
112 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR); 112 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
113 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */ 113 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
114 wmb(); 114 wmb();
115} 115}
116 116
117static void bcsr_irq_unmask(unsigned int irq_nr) 117static void bcsr_irq_unmask(struct irq_data *d)
118{ 118{
119 unsigned short v = 1 << (irq_nr - bcsr_csc_base); 119 unsigned short v = 1 << (d->irq - bcsr_csc_base);
120 __raw_writew(v, bcsr_virt + BCSR_REG_INTSET); 120 __raw_writew(v, bcsr_virt + BCSR_REG_INTSET);
121 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET); 121 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
122 wmb(); 122 wmb();
@@ -124,9 +124,9 @@ static void bcsr_irq_unmask(unsigned int irq_nr)
124 124
125static struct irq_chip bcsr_irq_type = { 125static struct irq_chip bcsr_irq_type = {
126 .name = "CPLD", 126 .name = "CPLD",
127 .mask = bcsr_irq_mask, 127 .irq_mask = bcsr_irq_mask,
128 .mask_ack = bcsr_irq_maskack, 128 .irq_mask_ack = bcsr_irq_maskack,
129 .unmask = bcsr_irq_unmask, 129 .irq_unmask = bcsr_irq_unmask,
130}; 130};
131 131
132void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq) 132void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
@@ -142,8 +142,8 @@ void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
142 bcsr_csc_base = csc_start; 142 bcsr_csc_base = csc_start;
143 143
144 for (irq = csc_start; irq <= csc_end; irq++) 144 for (irq = csc_start; irq <= csc_end; irq++)
145 set_irq_chip_and_handler_name(irq, &bcsr_irq_type, 145 irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
146 handle_level_irq, "level"); 146 handle_level_irq, "level");
147 147
148 set_irq_chained_handler(hook_irq, bcsr_csc_handler); 148 irq_set_chained_handler(hook_irq, bcsr_csc_handler);
149} 149}
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 88761954755..4a8980027ec 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -63,20 +63,19 @@ void __init board_setup(void)
63static int __init db1200_arch_init(void) 63static int __init db1200_arch_init(void)
64{ 64{
65 /* GPIO7 is low-level triggered CPLD cascade */ 65 /* GPIO7 is low-level triggered CPLD cascade */
66 set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 66 irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
67 bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); 67 bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
68 68
69 /* insert/eject pairs: one of both is always screaming. To avoid 69 /* insert/eject pairs: one of both is always screaming. To avoid
70 * issues they must not be automatically enabled when initially 70 * issues they must not be automatically enabled when initially
71 * requested. 71 * requested.
72 */ 72 */
73 irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; 73 irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN);
74 irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; 74 irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN);
75 irq_to_desc(DB1200_PC0_INSERT_INT)->status |= IRQ_NOAUTOEN; 75 irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN);
76 irq_to_desc(DB1200_PC0_EJECT_INT)->status |= IRQ_NOAUTOEN; 76 irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN);
77 irq_to_desc(DB1200_PC1_INSERT_INT)->status |= IRQ_NOAUTOEN; 77 irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN);
78 irq_to_desc(DB1200_PC1_EJECT_INT)->status |= IRQ_NOAUTOEN; 78 irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN);
79
80 return 0; 79 return 0;
81} 80}
82arch_initcall(db1200_arch_init); 81arch_initcall(db1200_arch_init);
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
index 9e45971343e..05f120ff90f 100644
--- a/arch/mips/alchemy/devboards/db1x00/board_setup.c
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -215,35 +215,35 @@ void __init board_setup(void)
215static int __init db1x00_init_irq(void) 215static int __init db1x00_init_irq(void)
216{ 216{
217#if defined(CONFIG_MIPS_MIRAGE) 217#if defined(CONFIG_MIPS_MIRAGE)
218 set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */ 218 irq_set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */
219#elif defined(CONFIG_MIPS_DB1550) 219#elif defined(CONFIG_MIPS_DB1550)
220 set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 220 irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
221 set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */ 221 irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */
222 set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 222 irq_set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */
223 set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 223 irq_set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
224 set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 224 irq_set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
225 set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 225 irq_set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
226#elif defined(CONFIG_MIPS_DB1500) 226#elif defined(CONFIG_MIPS_DB1500)
227 set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 227 irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
228 set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 228 irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
229 set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 229 irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
230 set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 230 irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
231 set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 231 irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
232 set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 232 irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
233#elif defined(CONFIG_MIPS_DB1100) 233#elif defined(CONFIG_MIPS_DB1100)
234 set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 234 irq_set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
235 set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 235 irq_set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
236 set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 236 irq_set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
237 set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 237 irq_set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
238 set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 238 irq_set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
239 set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 239 irq_set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
240#elif defined(CONFIG_MIPS_DB1000) 240#elif defined(CONFIG_MIPS_DB1000)
241 set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ 241 irq_set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
242 set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ 242 irq_set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
243 set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ 243 irq_set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
244 set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ 244 irq_set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
245 set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 245 irq_set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
246 set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ 246 irq_set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
247#endif 247#endif
248 return 0; 248 return 0;
249} 249}
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index f6540ec47a6..2d85c4b5be0 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -197,7 +197,7 @@ void __init board_setup(void)
197 197
198static int __init pb1000_init_irq(void) 198static int __init pb1000_init_irq(void)
199{ 199{
200 set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW); 200 irq_set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW);
201 return 0; 201 return 0;
202} 202}
203arch_initcall(pb1000_init_irq); 203arch_initcall(pb1000_init_irq);
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index 90dda5f3ecc..d108fd573aa 100644
--- a/arch/mips/alchemy/devboards/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -117,10 +117,10 @@ void __init board_setup(void)
117 117
118static int __init pb1100_init_irq(void) 118static int __init pb1100_init_irq(void)
119{ 119{
120 set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */ 120 irq_set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */
121 set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */ 121 irq_set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */
122 set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */ 122 irq_set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */
123 set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */ 123 irq_set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */
124 124
125 return 0; 125 return 0;
126} 126}
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 8b4466f2d44..6d06b07c238 100644
--- a/arch/mips/alchemy/devboards/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -142,7 +142,7 @@ static int __init pb1200_init_irq(void)
142 panic("Game over. Your score is 0."); 142 panic("Game over. Your score is 0.");
143 } 143 }
144 144
145 set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); 145 irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
146 bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT); 146 bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT);
147 147
148 return 0; 148 return 0;
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index 9cd9dfa698e..83f46215eb0 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -134,14 +134,14 @@ void __init board_setup(void)
134 134
135static int __init pb1500_init_irq(void) 135static int __init pb1500_init_irq(void)
136{ 136{
137 set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ 137 irq_set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */
138 set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ 138 irq_set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */
139 set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ 139 irq_set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
140 set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 140 irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
141 set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 141 irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
142 set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 142 irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
143 set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 143 irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
144 set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 144 irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
145 145
146 return 0; 146 return 0;
147} 147}
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index 9d7d6edafa8..b790213848b 100644
--- a/arch/mips/alchemy/devboards/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -73,9 +73,9 @@ void __init board_setup(void)
73 73
74static int __init pb1550_init_irq(void) 74static int __init pb1550_init_irq(void)
75{ 75{
76 set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); 76 irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW);
77 set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); 77 irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW);
78 set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH); 78 irq_set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH);
79 79
80 /* enable both PCMCIA card irqs in the shared line */ 80 /* enable both PCMCIA card irqs in the shared line */
81 alchemy_gpio2_enable_int(201); 81 alchemy_gpio2_enable_int(201);
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 6398fa95905..cf436ab679a 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
54 54
55static void mtx1_reset(char *c) 55static void mtx1_reset(char *c)
56{ 56{
57 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 57 /* Jump to the reset vector */
58 au_writel(0x00000000, 0xAE00001C); 58 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
59} 59}
60 60
61static void mtx1_power_off(void) 61static void mtx1_power_off(void)
@@ -123,11 +123,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
123 123
124static int __init mtx1_init_irq(void) 124static int __init mtx1_init_irq(void)
125{ 125{
126 set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 126 irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
127 set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 127 irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
128 set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 128 irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
129 set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 129 irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
130 set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 130 irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
131 131
132 return 0; 132 return 0;
133} 133}
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c
index e30e42add69..956f946218c 100644
--- a/arch/mips/alchemy/mtx-1/platform.c
+++ b/arch/mips/alchemy/mtx-1/platform.c
@@ -28,6 +28,8 @@
28#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
29#include <mtd/mtd-abi.h> 29#include <mtd/mtd-abi.h>
30 30
31#include <asm/mach-au1x00/au1xxx_eth.h>
32
31static struct gpio_keys_button mtx1_gpio_button[] = { 33static struct gpio_keys_button mtx1_gpio_button[] = {
32 { 34 {
33 .gpio = 207, 35 .gpio = 207,
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
140 &mtx1_mtd, 142 &mtx1_mtd,
141}; 143};
142 144
145static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
146 .phy_search_highest_addr = 1,
147 .phy1_search_mac0 = 1,
148};
149
143static int __init mtx1_register_devices(void) 150static int __init mtx1_register_devices(void)
144{ 151{
145 int rc; 152 int rc;
146 153
154 au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
155
147 rc = gpio_request(mtx1_gpio_button[0].gpio, 156 rc = gpio_request(mtx1_gpio_button[0].gpio,
148 mtx1_gpio_button[0].desc); 157 mtx1_gpio_button[0].desc);
149 if (rc < 0) { 158 if (rc < 0) {
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index b43c918925d..febfb0fb089 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -36,8 +36,8 @@
36 36
37static void xxs1500_reset(char *c) 37static void xxs1500_reset(char *c)
38{ 38{
39 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 39 /* Jump to the reset vector */
40 au_writel(0x00000000, 0xAE00001C); 40 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
41} 41}
42 42
43static void xxs1500_power_off(void) 43static void xxs1500_power_off(void)
@@ -85,19 +85,19 @@ void __init board_setup(void)
85 85
86static int __init xxs1500_init_irq(void) 86static int __init xxs1500_init_irq(void)
87{ 87{
88 set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); 88 irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
89 set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); 89 irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
90 set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); 90 irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
91 set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); 91 irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
92 set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); 92 irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
93 set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); 93 irq_set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW);
94 94
95 set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); 95 irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW);
96 set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); 96 irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW);
97 set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); 97 irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW);
98 set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); 98 irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW);
99 set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ 99 irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */
100 set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); 100 irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW);
101 101
102 return 0; 102 return 0;
103} 103}
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
index 4ec2642c568..03db3daadbd 100644
--- a/arch/mips/ar7/irq.c
+++ b/arch/mips/ar7/irq.c
@@ -49,51 +49,51 @@
49 49
50static int ar7_irq_base; 50static int ar7_irq_base;
51 51
52static void ar7_unmask_irq(unsigned int irq) 52static void ar7_unmask_irq(struct irq_data *d)
53{ 53{
54 writel(1 << ((irq - ar7_irq_base) % 32), 54 writel(1 << ((d->irq - ar7_irq_base) % 32),
55 REG(ESR_OFFSET(irq - ar7_irq_base))); 55 REG(ESR_OFFSET(d->irq - ar7_irq_base)));
56} 56}
57 57
58static void ar7_mask_irq(unsigned int irq) 58static void ar7_mask_irq(struct irq_data *d)
59{ 59{
60 writel(1 << ((irq - ar7_irq_base) % 32), 60 writel(1 << ((d->irq - ar7_irq_base) % 32),
61 REG(ECR_OFFSET(irq - ar7_irq_base))); 61 REG(ECR_OFFSET(d->irq - ar7_irq_base)));
62} 62}
63 63
64static void ar7_ack_irq(unsigned int irq) 64static void ar7_ack_irq(struct irq_data *d)
65{ 65{
66 writel(1 << ((irq - ar7_irq_base) % 32), 66 writel(1 << ((d->irq - ar7_irq_base) % 32),
67 REG(CR_OFFSET(irq - ar7_irq_base))); 67 REG(CR_OFFSET(d->irq - ar7_irq_base)));
68} 68}
69 69
70static void ar7_unmask_sec_irq(unsigned int irq) 70static void ar7_unmask_sec_irq(struct irq_data *d)
71{ 71{
72 writel(1 << (irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET)); 72 writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
73} 73}
74 74
75static void ar7_mask_sec_irq(unsigned int irq) 75static void ar7_mask_sec_irq(struct irq_data *d)
76{ 76{
77 writel(1 << (irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET)); 77 writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
78} 78}
79 79
80static void ar7_ack_sec_irq(unsigned int irq) 80static void ar7_ack_sec_irq(struct irq_data *d)
81{ 81{
82 writel(1 << (irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET)); 82 writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
83} 83}
84 84
85static struct irq_chip ar7_irq_type = { 85static struct irq_chip ar7_irq_type = {
86 .name = "AR7", 86 .name = "AR7",
87 .unmask = ar7_unmask_irq, 87 .irq_unmask = ar7_unmask_irq,
88 .mask = ar7_mask_irq, 88 .irq_mask = ar7_mask_irq,
89 .ack = ar7_ack_irq 89 .irq_ack = ar7_ack_irq
90}; 90};
91 91
92static struct irq_chip ar7_sec_irq_type = { 92static struct irq_chip ar7_sec_irq_type = {
93 .name = "AR7", 93 .name = "AR7",
94 .unmask = ar7_unmask_sec_irq, 94 .irq_unmask = ar7_unmask_sec_irq,
95 .mask = ar7_mask_sec_irq, 95 .irq_mask = ar7_mask_sec_irq,
96 .ack = ar7_ack_sec_irq, 96 .irq_ack = ar7_ack_sec_irq,
97}; 97};
98 98
99static struct irqaction ar7_cascade_action = { 99static struct irqaction ar7_cascade_action = {
@@ -119,11 +119,11 @@ static void __init ar7_irq_init(int base)
119 for (i = 0; i < 40; i++) { 119 for (i = 0; i < 40; i++) {
120 writel(i, REG(CHNL_OFFSET(i))); 120 writel(i, REG(CHNL_OFFSET(i)));
121 /* Primary IRQ's */ 121 /* Primary IRQ's */
122 set_irq_chip_and_handler(base + i, &ar7_irq_type, 122 irq_set_chip_and_handler(base + i, &ar7_irq_type,
123 handle_level_irq); 123 handle_level_irq);
124 /* Secondary IRQ's */ 124 /* Secondary IRQ's */
125 if (i < 32) 125 if (i < 32)
126 set_irq_chip_and_handler(base + i + 40, 126 irq_set_chip_and_handler(base + i + 40,
127 &ar7_sec_irq_type, 127 &ar7_sec_irq_type,
128 handle_level_irq); 128 handle_level_irq);
129 } 129 }
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 1bf7f719ba5..ac610d5fe3b 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -62,13 +62,12 @@ static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
62 spurious_interrupt(); 62 spurious_interrupt();
63} 63}
64 64
65static void ar71xx_misc_irq_unmask(unsigned int irq) 65static void ar71xx_misc_irq_unmask(struct irq_data *d)
66{ 66{
67 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
67 void __iomem *base = ath79_reset_base; 68 void __iomem *base = ath79_reset_base;
68 u32 t; 69 u32 t;
69 70
70 irq -= ATH79_MISC_IRQ_BASE;
71
72 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); 71 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
73 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); 72 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
74 73
@@ -76,13 +75,12 @@ static void ar71xx_misc_irq_unmask(unsigned int irq)
76 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); 75 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
77} 76}
78 77
79static void ar71xx_misc_irq_mask(unsigned int irq) 78static void ar71xx_misc_irq_mask(struct irq_data *d)
80{ 79{
80 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
81 void __iomem *base = ath79_reset_base; 81 void __iomem *base = ath79_reset_base;
82 u32 t; 82 u32 t;
83 83
84 irq -= ATH79_MISC_IRQ_BASE;
85
86 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); 84 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
87 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE); 85 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
88 86
@@ -90,13 +88,12 @@ static void ar71xx_misc_irq_mask(unsigned int irq)
90 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); 88 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
91} 89}
92 90
93static void ar724x_misc_irq_ack(unsigned int irq) 91static void ar724x_misc_irq_ack(struct irq_data *d)
94{ 92{
93 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
95 void __iomem *base = ath79_reset_base; 94 void __iomem *base = ath79_reset_base;
96 u32 t; 95 u32 t;
97 96
98 irq -= ATH79_MISC_IRQ_BASE;
99
100 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS); 97 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
101 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS); 98 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
102 99
@@ -106,8 +103,8 @@ static void ar724x_misc_irq_ack(unsigned int irq)
106 103
107static struct irq_chip ath79_misc_irq_chip = { 104static struct irq_chip ath79_misc_irq_chip = {
108 .name = "MISC", 105 .name = "MISC",
109 .unmask = ar71xx_misc_irq_unmask, 106 .irq_unmask = ar71xx_misc_irq_unmask,
110 .mask = ar71xx_misc_irq_mask, 107 .irq_mask = ar71xx_misc_irq_mask,
111}; 108};
112 109
113static void __init ath79_misc_irq_init(void) 110static void __init ath79_misc_irq_init(void)
@@ -119,20 +116,19 @@ static void __init ath79_misc_irq_init(void)
119 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); 116 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
120 117
121 if (soc_is_ar71xx() || soc_is_ar913x()) 118 if (soc_is_ar71xx() || soc_is_ar913x())
122 ath79_misc_irq_chip.mask_ack = ar71xx_misc_irq_mask; 119 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
123 else if (soc_is_ar724x()) 120 else if (soc_is_ar724x())
124 ath79_misc_irq_chip.ack = ar724x_misc_irq_ack; 121 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
125 else 122 else
126 BUG(); 123 BUG();
127 124
128 for (i = ATH79_MISC_IRQ_BASE; 125 for (i = ATH79_MISC_IRQ_BASE;
129 i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) { 126 i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
130 irq_desc[i].status = IRQ_DISABLED; 127 irq_set_chip_and_handler(i, &ath79_misc_irq_chip,
131 set_irq_chip_and_handler(i, &ath79_misc_irq_chip,
132 handle_level_irq); 128 handle_level_irq);
133 } 129 }
134 130
135 set_irq_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); 131 irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
136} 132}
137 133
138asmlinkage void plat_irq_dispatch(void) 134asmlinkage void plat_irq_dispatch(void)
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
index e5cc86dc1da..9f64fb41407 100644
--- a/arch/mips/bcm63xx/boards/Makefile
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o 1obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o
2 2
3EXTRA_CFLAGS += -Werror 3ccflags-y := -Werror
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index 3be87f2422f..cea6021cb8d 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -76,88 +76,80 @@ asmlinkage void plat_irq_dispatch(void)
76 * internal IRQs operations: only mask/unmask on PERF irq mask 76 * internal IRQs operations: only mask/unmask on PERF irq mask
77 * register. 77 * register.
78 */ 78 */
79static inline void bcm63xx_internal_irq_mask(unsigned int irq) 79static inline void bcm63xx_internal_irq_mask(struct irq_data *d)
80{ 80{
81 unsigned int irq = d->irq - IRQ_INTERNAL_BASE;
81 u32 mask; 82 u32 mask;
82 83
83 irq -= IRQ_INTERNAL_BASE;
84 mask = bcm_perf_readl(PERF_IRQMASK_REG); 84 mask = bcm_perf_readl(PERF_IRQMASK_REG);
85 mask &= ~(1 << irq); 85 mask &= ~(1 << irq);
86 bcm_perf_writel(mask, PERF_IRQMASK_REG); 86 bcm_perf_writel(mask, PERF_IRQMASK_REG);
87} 87}
88 88
89static void bcm63xx_internal_irq_unmask(unsigned int irq) 89static void bcm63xx_internal_irq_unmask(struct irq_data *d)
90{ 90{
91 unsigned int irq = d->irq - IRQ_INTERNAL_BASE;
91 u32 mask; 92 u32 mask;
92 93
93 irq -= IRQ_INTERNAL_BASE;
94 mask = bcm_perf_readl(PERF_IRQMASK_REG); 94 mask = bcm_perf_readl(PERF_IRQMASK_REG);
95 mask |= (1 << irq); 95 mask |= (1 << irq);
96 bcm_perf_writel(mask, PERF_IRQMASK_REG); 96 bcm_perf_writel(mask, PERF_IRQMASK_REG);
97} 97}
98 98
99static unsigned int bcm63xx_internal_irq_startup(unsigned int irq)
100{
101 bcm63xx_internal_irq_unmask(irq);
102 return 0;
103}
104
105/* 99/*
106 * external IRQs operations: mask/unmask and clear on PERF external 100 * external IRQs operations: mask/unmask and clear on PERF external
107 * irq control register. 101 * irq control register.
108 */ 102 */
109static void bcm63xx_external_irq_mask(unsigned int irq) 103static void bcm63xx_external_irq_mask(struct irq_data *d)
110{ 104{
105 unsigned int irq = d->irq - IRQ_EXT_BASE;
111 u32 reg; 106 u32 reg;
112 107
113 irq -= IRQ_EXT_BASE;
114 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG); 108 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
115 reg &= ~EXTIRQ_CFG_MASK(irq); 109 reg &= ~EXTIRQ_CFG_MASK(irq);
116 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG); 110 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
117} 111}
118 112
119static void bcm63xx_external_irq_unmask(unsigned int irq) 113static void bcm63xx_external_irq_unmask(struct irq_data *d)
120{ 114{
115 unsigned int irq = d->irq - IRQ_EXT_BASE;
121 u32 reg; 116 u32 reg;
122 117
123 irq -= IRQ_EXT_BASE;
124 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG); 118 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
125 reg |= EXTIRQ_CFG_MASK(irq); 119 reg |= EXTIRQ_CFG_MASK(irq);
126 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG); 120 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
127} 121}
128 122
129static void bcm63xx_external_irq_clear(unsigned int irq) 123static void bcm63xx_external_irq_clear(struct irq_data *d)
130{ 124{
125 unsigned int irq = d->irq - IRQ_EXT_BASE;
131 u32 reg; 126 u32 reg;
132 127
133 irq -= IRQ_EXT_BASE;
134 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG); 128 reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
135 reg |= EXTIRQ_CFG_CLEAR(irq); 129 reg |= EXTIRQ_CFG_CLEAR(irq);
136 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG); 130 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
137} 131}
138 132
139static unsigned int bcm63xx_external_irq_startup(unsigned int irq) 133static unsigned int bcm63xx_external_irq_startup(struct irq_data *d)
140{ 134{
141 set_c0_status(0x100 << (irq - IRQ_MIPS_BASE)); 135 set_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE));
142 irq_enable_hazard(); 136 irq_enable_hazard();
143 bcm63xx_external_irq_unmask(irq); 137 bcm63xx_external_irq_unmask(d);
144 return 0; 138 return 0;
145} 139}
146 140
147static void bcm63xx_external_irq_shutdown(unsigned int irq) 141static void bcm63xx_external_irq_shutdown(struct irq_data *d)
148{ 142{
149 bcm63xx_external_irq_mask(irq); 143 bcm63xx_external_irq_mask(d);
150 clear_c0_status(0x100 << (irq - IRQ_MIPS_BASE)); 144 clear_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE));
151 irq_disable_hazard(); 145 irq_disable_hazard();
152} 146}
153 147
154static int bcm63xx_external_irq_set_type(unsigned int irq, 148static int bcm63xx_external_irq_set_type(struct irq_data *d,
155 unsigned int flow_type) 149 unsigned int flow_type)
156{ 150{
151 unsigned int irq = d->irq - IRQ_EXT_BASE;
157 u32 reg; 152 u32 reg;
158 struct irq_desc *desc = irq_desc + irq;
159
160 irq -= IRQ_EXT_BASE;
161 153
162 flow_type &= IRQ_TYPE_SENSE_MASK; 154 flow_type &= IRQ_TYPE_SENSE_MASK;
163 155
@@ -199,37 +191,32 @@ static int bcm63xx_external_irq_set_type(unsigned int irq,
199 } 191 }
200 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG); 192 bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
201 193
202 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) { 194 irqd_set_trigger_type(d, flow_type);
203 desc->status |= IRQ_LEVEL; 195 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
204 desc->handle_irq = handle_level_irq; 196 __irq_set_handler_locked(d->irq, handle_level_irq);
205 } else { 197 else
206 desc->handle_irq = handle_edge_irq; 198 __irq_set_handler_locked(d->irq, handle_edge_irq);
207 }
208 199
209 return 0; 200 return IRQ_SET_MASK_OK_NOCOPY;
210} 201}
211 202
212static struct irq_chip bcm63xx_internal_irq_chip = { 203static struct irq_chip bcm63xx_internal_irq_chip = {
213 .name = "bcm63xx_ipic", 204 .name = "bcm63xx_ipic",
214 .startup = bcm63xx_internal_irq_startup, 205 .irq_mask = bcm63xx_internal_irq_mask,
215 .shutdown = bcm63xx_internal_irq_mask, 206 .irq_unmask = bcm63xx_internal_irq_unmask,
216
217 .mask = bcm63xx_internal_irq_mask,
218 .mask_ack = bcm63xx_internal_irq_mask,
219 .unmask = bcm63xx_internal_irq_unmask,
220}; 207};
221 208
222static struct irq_chip bcm63xx_external_irq_chip = { 209static struct irq_chip bcm63xx_external_irq_chip = {
223 .name = "bcm63xx_epic", 210 .name = "bcm63xx_epic",
224 .startup = bcm63xx_external_irq_startup, 211 .irq_startup = bcm63xx_external_irq_startup,
225 .shutdown = bcm63xx_external_irq_shutdown, 212 .irq_shutdown = bcm63xx_external_irq_shutdown,
226 213
227 .ack = bcm63xx_external_irq_clear, 214 .irq_ack = bcm63xx_external_irq_clear,
228 215
229 .mask = bcm63xx_external_irq_mask, 216 .irq_mask = bcm63xx_external_irq_mask,
230 .unmask = bcm63xx_external_irq_unmask, 217 .irq_unmask = bcm63xx_external_irq_unmask,
231 218
232 .set_type = bcm63xx_external_irq_set_type, 219 .irq_set_type = bcm63xx_external_irq_set_type,
233}; 220};
234 221
235static struct irqaction cpu_ip2_cascade_action = { 222static struct irqaction cpu_ip2_cascade_action = {
@@ -243,11 +230,11 @@ void __init arch_init_irq(void)
243 230
244 mips_cpu_irq_init(); 231 mips_cpu_irq_init();
245 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) 232 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
246 set_irq_chip_and_handler(i, &bcm63xx_internal_irq_chip, 233 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
247 handle_level_irq); 234 handle_level_irq);
248 235
249 for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i) 236 for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i)
250 set_irq_chip_and_handler(i, &bcm63xx_external_irq_chip, 237 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
251 handle_edge_irq); 238 handle_edge_irq);
252 239
253 setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action); 240 setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action);
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index 9afc3794ed1..c8d35684504 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -75,7 +75,7 @@ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
75 75
76 num_cores = cvmx_octeon_num_cores(); 76 num_cores = cvmx_octeon_num_cores();
77 77
78 /* Make sure the non existant devices look disabled */ 78 /* Make sure the non existent devices look disabled */
79 switch ((chip_id >> 8) & 0xff) { 79 switch ((chip_id >> 8) & 0xff) {
80 case 6: /* CN50XX */ 80 case 6: /* CN50XX */
81 case 2: /* CN30XX */ 81 case 2: /* CN30XX */
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index ce7500cdf5b..ffd4ae660f7 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,10 +3,13 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks 6 * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks
7 */ 7 */
8#include <linux/irq.h> 8
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/bitops.h>
11#include <linux/percpu.h>
12#include <linux/irq.h>
10#include <linux/smp.h> 13#include <linux/smp.h>
11 14
12#include <asm/octeon/octeon.h> 15#include <asm/octeon/octeon.h>
@@ -14,6 +17,47 @@
14static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); 17static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); 18static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
16 19
20static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
21static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
22
23static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
24
25union octeon_ciu_chip_data {
26 void *p;
27 unsigned long l;
28 struct {
29 unsigned int line:6;
30 unsigned int bit:6;
31 } s;
32};
33
34struct octeon_core_chip_data {
35 struct mutex core_irq_mutex;
36 bool current_en;
37 bool desired_en;
38 u8 bit;
39};
40
41#define MIPS_CORE_IRQ_LINES 8
42
43static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
44
45static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
46 struct irq_chip *chip,
47 irq_flow_handler_t handler)
48{
49 union octeon_ciu_chip_data cd;
50
51 irq_set_chip_and_handler(irq, chip, handler);
52
53 cd.l = 0;
54 cd.s.line = line;
55 cd.s.bit = bit;
56
57 irq_set_chip_data(irq, cd.p);
58 octeon_irq_ciu_to_irq[line][bit] = irq;
59}
60
17static int octeon_coreid_for_cpu(int cpu) 61static int octeon_coreid_for_cpu(int cpu)
18{ 62{
19#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
@@ -23,9 +67,20 @@ static int octeon_coreid_for_cpu(int cpu)
23#endif 67#endif
24} 68}
25 69
26static void octeon_irq_core_ack(unsigned int irq) 70static int octeon_cpu_for_coreid(int coreid)
71{
72#ifdef CONFIG_SMP
73 return cpu_number_map(coreid);
74#else
75 return smp_processor_id();
76#endif
77}
78
79static void octeon_irq_core_ack(struct irq_data *data)
27{ 80{
28 unsigned int bit = irq - OCTEON_IRQ_SW0; 81 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
82 unsigned int bit = cd->bit;
83
29 /* 84 /*
30 * We don't need to disable IRQs to make these atomic since 85 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level 86 * they are already disabled earlier in the low level
@@ -37,131 +92,121 @@ static void octeon_irq_core_ack(unsigned int irq)
37 clear_c0_cause(0x100 << bit); 92 clear_c0_cause(0x100 << bit);
38} 93}
39 94
40static void octeon_irq_core_eoi(unsigned int irq) 95static void octeon_irq_core_eoi(struct irq_data *data)
41{ 96{
42 struct irq_desc *desc = irq_to_desc(irq); 97 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
43 unsigned int bit = irq - OCTEON_IRQ_SW0; 98
44 /*
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
47 * been disabled.
48 */
49 if ((unlikely(desc->status & IRQ_DISABLED)))
50 return;
51 /* 99 /*
52 * We don't need to disable IRQs to make these atomic since 100 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level 101 * they are already disabled earlier in the low level
54 * interrupt code. 102 * interrupt code.
55 */ 103 */
56 set_c0_status(0x100 << bit); 104 set_c0_status(0x100 << cd->bit);
57} 105}
58 106
59static void octeon_irq_core_enable(unsigned int irq) 107static void octeon_irq_core_set_enable_local(void *arg)
60{ 108{
61 unsigned long flags; 109 struct irq_data *data = arg;
62 unsigned int bit = irq - OCTEON_IRQ_SW0; 110 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
111 unsigned int mask = 0x100 << cd->bit;
63 112
64 /* 113 /*
65 * We need to disable interrupts to make sure our updates are 114 * Interrupts are already disabled, so these are atomic.
66 * atomic.
67 */ 115 */
68 local_irq_save(flags); 116 if (cd->desired_en)
69 set_c0_status(0x100 << bit); 117 set_c0_status(mask);
70 local_irq_restore(flags); 118 else
119 clear_c0_status(mask);
120
71} 121}
72 122
73static void octeon_irq_core_disable_local(unsigned int irq) 123static void octeon_irq_core_disable(struct irq_data *data)
74{ 124{
75 unsigned long flags; 125 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
76 unsigned int bit = irq - OCTEON_IRQ_SW0; 126 cd->desired_en = false;
77 /*
78 * We need to disable interrupts to make sure our updates are
79 * atomic.
80 */
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
84} 127}
85 128
86static void octeon_irq_core_disable(unsigned int irq) 129static void octeon_irq_core_enable(struct irq_data *data)
87{ 130{
88#ifdef CONFIG_SMP 131 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, 132 cd->desired_en = true;
90 (void *) (long) irq, 1);
91#else
92 octeon_irq_core_disable_local(irq);
93#endif
94} 133}
95 134
96static struct irq_chip octeon_irq_chip_core = { 135static void octeon_irq_core_bus_lock(struct irq_data *data)
97 .name = "Core", 136{
98 .enable = octeon_irq_core_enable, 137 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
102};
103 138
139 mutex_lock(&cd->core_irq_mutex);
140}
104 141
105static void octeon_irq_ciu0_ack(unsigned int irq) 142static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
106{ 143{
107 switch (irq) { 144 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
108 case OCTEON_IRQ_GMX_DRP0: 145
109 case OCTEON_IRQ_GMX_DRP1: 146 if (cd->desired_en != cd->current_en) {
110 case OCTEON_IRQ_IPD_DRP: 147 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
111 case OCTEON_IRQ_KEY_ZERO: 148
112 case OCTEON_IRQ_TIMER0: 149 cd->current_en = cd->desired_en;
113 case OCTEON_IRQ_TIMER1:
114 case OCTEON_IRQ_TIMER2:
115 case OCTEON_IRQ_TIMER3:
116 {
117 int index = cvmx_get_core_num() * 2;
118 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
119 /*
120 * CIU timer type interrupts must be acknoleged by
121 * writing a '1' bit to their sum0 bit.
122 */
123 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
124 break;
125 }
126 default:
127 break;
128 } 150 }
129 151
130 /* 152 mutex_unlock(&cd->core_irq_mutex);
131 * In order to avoid any locking accessing the CIU, we
132 * acknowledge CIU interrupts by disabling all of them. This
133 * way we can use a per core register and avoid any out of
134 * core locking requirements. This has the side affect that
135 * CIU interrupts can't be processed recursively.
136 *
137 * We don't need to disable IRQs to make these atomic since
138 * they are already disabled earlier in the low level
139 * interrupt code.
140 */
141 clear_c0_status(0x100 << 2);
142} 153}
143 154
144static void octeon_irq_ciu0_eoi(unsigned int irq) 155static struct irq_chip octeon_irq_chip_core = {
156 .name = "Core",
157 .irq_enable = octeon_irq_core_enable,
158 .irq_disable = octeon_irq_core_disable,
159 .irq_ack = octeon_irq_core_ack,
160 .irq_eoi = octeon_irq_core_eoi,
161 .irq_bus_lock = octeon_irq_core_bus_lock,
162 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
163
164 .irq_cpu_online = octeon_irq_core_eoi,
165 .irq_cpu_offline = octeon_irq_core_ack,
166 .flags = IRQCHIP_ONOFFLINE_ENABLED,
167};
168
169static void __init octeon_irq_init_core(void)
145{ 170{
146 /* 171 int i;
147 * Enable all CIU interrupts again. We don't need to disable 172 int irq;
148 * IRQs to make these atomic since they are already disabled 173 struct octeon_core_chip_data *cd;
149 * earlier in the low level interrupt code. 174
150 */ 175 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
151 set_c0_status(0x100 << 2); 176 cd = &octeon_irq_core_chip_data[i];
177 cd->current_en = false;
178 cd->desired_en = false;
179 cd->bit = i;
180 mutex_init(&cd->core_irq_mutex);
181
182 irq = OCTEON_IRQ_SW0 + i;
183 switch (irq) {
184 case OCTEON_IRQ_TIMER:
185 case OCTEON_IRQ_SW0:
186 case OCTEON_IRQ_SW1:
187 case OCTEON_IRQ_5:
188 case OCTEON_IRQ_PERF:
189 irq_set_chip_data(irq, cd);
190 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
191 handle_percpu_irq);
192 break;
193 default:
194 break;
195 }
196 }
152} 197}
153 198
154static int next_coreid_for_irq(struct irq_desc *desc) 199static int next_cpu_for_irq(struct irq_data *data)
155{ 200{
156 201
157#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
158 int coreid; 203 int cpu;
159 int weight = cpumask_weight(desc->affinity); 204 int weight = cpumask_weight(data->affinity);
160 205
161 if (weight > 1) { 206 if (weight > 1) {
162 int cpu = smp_processor_id(); 207 cpu = smp_processor_id();
163 for (;;) { 208 for (;;) {
164 cpu = cpumask_next(cpu, desc->affinity); 209 cpu = cpumask_next(cpu, data->affinity);
165 if (cpu >= nr_cpu_ids) { 210 if (cpu >= nr_cpu_ids) {
166 cpu = -1; 211 cpu = -1;
167 continue; 212 continue;
@@ -169,83 +214,175 @@ static int next_coreid_for_irq(struct irq_desc *desc)
169 break; 214 break;
170 } 215 }
171 } 216 }
172 coreid = octeon_coreid_for_cpu(cpu);
173 } else if (weight == 1) { 217 } else if (weight == 1) {
174 coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); 218 cpu = cpumask_first(data->affinity);
175 } else { 219 } else {
176 coreid = cvmx_get_core_num(); 220 cpu = smp_processor_id();
177 } 221 }
178 return coreid; 222 return cpu;
179#else 223#else
180 return cvmx_get_core_num(); 224 return smp_processor_id();
181#endif 225#endif
182} 226}
183 227
184static void octeon_irq_ciu0_enable(unsigned int irq) 228static void octeon_irq_ciu_enable(struct irq_data *data)
185{ 229{
186 struct irq_desc *desc = irq_to_desc(irq); 230 int cpu = next_cpu_for_irq(data);
187 int coreid = next_coreid_for_irq(desc); 231 int coreid = octeon_coreid_for_cpu(cpu);
232 unsigned long *pen;
188 unsigned long flags; 233 unsigned long flags;
189 uint64_t en0; 234 union octeon_ciu_chip_data cd;
190 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 235
236 cd.p = irq_data_get_irq_chip_data(data);
191 237
192 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 238 if (cd.s.line == 0) {
193 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 239 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
194 en0 |= 1ull << bit; 240 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
195 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 241 set_bit(cd.s.bit, pen);
196 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 242 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
197 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 243 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
244 } else {
245 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
246 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
247 set_bit(cd.s.bit, pen);
248 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
249 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
250 }
198} 251}
199 252
200static void octeon_irq_ciu0_enable_mbox(unsigned int irq) 253static void octeon_irq_ciu_enable_local(struct irq_data *data)
201{ 254{
202 int coreid = cvmx_get_core_num(); 255 unsigned long *pen;
256 unsigned long flags;
257 union octeon_ciu_chip_data cd;
258
259 cd.p = irq_data_get_irq_chip_data(data);
260
261 if (cd.s.line == 0) {
262 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
263 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
264 set_bit(cd.s.bit, pen);
265 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
266 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
267 } else {
268 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
269 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
270 set_bit(cd.s.bit, pen);
271 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
272 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
273 }
274}
275
276static void octeon_irq_ciu_disable_local(struct irq_data *data)
277{
278 unsigned long *pen;
203 unsigned long flags; 279 unsigned long flags;
204 uint64_t en0; 280 union octeon_ciu_chip_data cd;
205 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 281
282 cd.p = irq_data_get_irq_chip_data(data);
206 283
207 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 284 if (cd.s.line == 0) {
208 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 285 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
209 en0 |= 1ull << bit; 286 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
210 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 287 clear_bit(cd.s.bit, pen);
211 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 288 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
212 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 289 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
290 } else {
291 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
292 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
293 clear_bit(cd.s.bit, pen);
294 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
295 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
296 }
213} 297}
214 298
215static void octeon_irq_ciu0_disable(unsigned int irq) 299static void octeon_irq_ciu_disable_all(struct irq_data *data)
216{ 300{
217 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
218 unsigned long flags; 301 unsigned long flags;
219 uint64_t en0; 302 unsigned long *pen;
220 int cpu; 303 int cpu;
221 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 304 union octeon_ciu_chip_data cd;
222 for_each_online_cpu(cpu) { 305
223 int coreid = octeon_coreid_for_cpu(cpu); 306 wmb(); /* Make sure flag changes arrive before register updates. */
224 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 307
225 en0 &= ~(1ull << bit); 308 cd.p = irq_data_get_irq_chip_data(data);
226 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 309
310 if (cd.s.line == 0) {
311 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
312 for_each_online_cpu(cpu) {
313 int coreid = octeon_coreid_for_cpu(cpu);
314 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
315 clear_bit(cd.s.bit, pen);
316 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
317 }
318 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
319 } else {
320 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
321 for_each_online_cpu(cpu) {
322 int coreid = octeon_coreid_for_cpu(cpu);
323 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
324 clear_bit(cd.s.bit, pen);
325 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
326 }
327 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
328 }
329}
330
331static void octeon_irq_ciu_enable_all(struct irq_data *data)
332{
333 unsigned long flags;
334 unsigned long *pen;
335 int cpu;
336 union octeon_ciu_chip_data cd;
337
338 cd.p = irq_data_get_irq_chip_data(data);
339
340 if (cd.s.line == 0) {
341 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
342 for_each_online_cpu(cpu) {
343 int coreid = octeon_coreid_for_cpu(cpu);
344 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
345 set_bit(cd.s.bit, pen);
346 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
347 }
348 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
349 } else {
350 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
351 for_each_online_cpu(cpu) {
352 int coreid = octeon_coreid_for_cpu(cpu);
353 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
354 set_bit(cd.s.bit, pen);
355 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
356 }
357 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
227 } 358 }
228 /*
229 * We need to do a read after the last update to make sure all
230 * of them are done.
231 */
232 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
233 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
234} 359}
235 360
236/* 361/*
237 * Enable the irq on the next core in the affinity set for chips that 362 * Enable the irq on the next core in the affinity set for chips that
238 * have the EN*_W1{S,C} registers. 363 * have the EN*_W1{S,C} registers.
239 */ 364 */
240static void octeon_irq_ciu0_enable_v2(unsigned int irq) 365static void octeon_irq_ciu_enable_v2(struct irq_data *data)
241{ 366{
242 int index; 367 u64 mask;
243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 368 int cpu = next_cpu_for_irq(data);
244 struct irq_desc *desc = irq_to_desc(irq); 369 union octeon_ciu_chip_data cd;
370
371 cd.p = irq_data_get_irq_chip_data(data);
372 mask = 1ull << (cd.s.bit);
245 373
246 if ((desc->status & IRQ_DISABLED) == 0) { 374 /*
247 index = next_coreid_for_irq(desc) * 2; 375 * Called under the desc lock, so these should never get out
376 * of sync.
377 */
378 if (cd.s.line == 0) {
379 int index = octeon_coreid_for_cpu(cpu) * 2;
380 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 381 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
382 } else {
383 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
384 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
385 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
249 } 386 }
250} 387}
251 388
@@ -253,83 +390,155 @@ static void octeon_irq_ciu0_enable_v2(unsigned int irq)
253 * Enable the irq on the current CPU for chips that 390 * Enable the irq on the current CPU for chips that
254 * have the EN*_W1{S,C} registers. 391 * have the EN*_W1{S,C} registers.
255 */ 392 */
256static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) 393static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
394{
395 u64 mask;
396 union octeon_ciu_chip_data cd;
397
398 cd.p = irq_data_get_irq_chip_data(data);
399 mask = 1ull << (cd.s.bit);
400
401 if (cd.s.line == 0) {
402 int index = cvmx_get_core_num() * 2;
403 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
404 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
405 } else {
406 int index = cvmx_get_core_num() * 2 + 1;
407 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
408 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
409 }
410}
411
412static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
257{ 413{
258 int index; 414 u64 mask;
259 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 415 union octeon_ciu_chip_data cd;
260 416
261 index = cvmx_get_core_num() * 2; 417 cd.p = irq_data_get_irq_chip_data(data);
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 418 mask = 1ull << (cd.s.bit);
419
420 if (cd.s.line == 0) {
421 int index = cvmx_get_core_num() * 2;
422 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
423 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
424 } else {
425 int index = cvmx_get_core_num() * 2 + 1;
426 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
427 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
428 }
263} 429}
264 430
265/* 431/*
266 * Disable the irq on the current core for chips that have the EN*_W1{S,C} 432 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
267 * registers.
268 */ 433 */
269static void octeon_irq_ciu0_ack_v2(unsigned int irq) 434static void octeon_irq_ciu_ack(struct irq_data *data)
270{ 435{
271 int index = cvmx_get_core_num() * 2; 436 u64 mask;
272 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 437 union octeon_ciu_chip_data cd;
273 438
274 switch (irq) { 439 cd.p = data->chip_data;
275 case OCTEON_IRQ_GMX_DRP0: 440 mask = 1ull << (cd.s.bit);
276 case OCTEON_IRQ_GMX_DRP1: 441
277 case OCTEON_IRQ_IPD_DRP: 442 if (cd.s.line == 0) {
278 case OCTEON_IRQ_KEY_ZERO: 443 int index = cvmx_get_core_num() * 2;
279 case OCTEON_IRQ_TIMER0:
280 case OCTEON_IRQ_TIMER1:
281 case OCTEON_IRQ_TIMER2:
282 case OCTEON_IRQ_TIMER3:
283 /*
284 * CIU timer type interrupts must be acknoleged by
285 * writing a '1' bit to their sum0 bit.
286 */
287 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 444 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
288 break; 445 } else {
289 default: 446 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
290 break;
291 } 447 }
292
293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
294} 448}
295 449
296/* 450/*
297 * Enable the irq on the current core for chips that have the EN*_W1{S,C} 451 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
298 * registers. 452 * registers.
299 */ 453 */
300static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) 454static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
301{ 455{
302 struct irq_desc *desc = irq_to_desc(irq); 456 int cpu;
303 int index = cvmx_get_core_num() * 2; 457 u64 mask;
304 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 458 union octeon_ciu_chip_data cd;
305 459
306 if (likely((desc->status & IRQ_DISABLED) == 0)) 460 wmb(); /* Make sure flag changes arrive before register updates. */
307 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 461
462 cd.p = data->chip_data;
463 mask = 1ull << (cd.s.bit);
464
465 if (cd.s.line == 0) {
466 for_each_online_cpu(cpu) {
467 int index = octeon_coreid_for_cpu(cpu) * 2;
468 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
469 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
470 }
471 } else {
472 for_each_online_cpu(cpu) {
473 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
474 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
475 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
476 }
477 }
308} 478}
309 479
310/* 480/*
311 * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 481 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
312 * registers. 482 * registers.
313 */ 483 */
314static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) 484static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
315{ 485{
316 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
317 int index;
318 int cpu; 486 int cpu;
319 for_each_online_cpu(cpu) { 487 u64 mask;
320 index = octeon_coreid_for_cpu(cpu) * 2; 488 union octeon_ciu_chip_data cd;
321 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 489
490 cd.p = data->chip_data;
491 mask = 1ull << (cd.s.bit);
492
493 if (cd.s.line == 0) {
494 for_each_online_cpu(cpu) {
495 int index = octeon_coreid_for_cpu(cpu) * 2;
496 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
497 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
498 }
499 } else {
500 for_each_online_cpu(cpu) {
501 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
502 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
503 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
504 }
322 } 505 }
323} 506}
324 507
325#ifdef CONFIG_SMP 508#ifdef CONFIG_SMP
326static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) 509
510static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
511{
512 int cpu = smp_processor_id();
513 cpumask_t new_affinity;
514
515 if (!cpumask_test_cpu(cpu, data->affinity))
516 return;
517
518 if (cpumask_weight(data->affinity) > 1) {
519 /*
520 * It has multi CPU affinity, just remove this CPU
521 * from the affinity set.
522 */
523 cpumask_copy(&new_affinity, data->affinity);
524 cpumask_clear_cpu(cpu, &new_affinity);
525 } else {
526 /* Otherwise, put it on lowest numbered online CPU. */
527 cpumask_clear(&new_affinity);
528 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
529 }
530 __irq_set_affinity_locked(data, &new_affinity);
531}
532
533static int octeon_irq_ciu_set_affinity(struct irq_data *data,
534 const struct cpumask *dest, bool force)
327{ 535{
328 int cpu; 536 int cpu;
329 struct irq_desc *desc = irq_to_desc(irq); 537 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
330 int enable_one = (desc->status & IRQ_DISABLED) == 0;
331 unsigned long flags; 538 unsigned long flags;
332 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 539 union octeon_ciu_chip_data cd;
540
541 cd.p = data->chip_data;
333 542
334 /* 543 /*
335 * For non-v2 CIU, we will allow only single CPU affinity. 544 * For non-v2 CIU, we will allow only single CPU affinity.
@@ -339,26 +548,40 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *
339 if (cpumask_weight(dest) != 1) 548 if (cpumask_weight(dest) != 1)
340 return -EINVAL; 549 return -EINVAL;
341 550
342 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 551 if (!enable_one)
343 for_each_online_cpu(cpu) { 552 return 0;
344 int coreid = octeon_coreid_for_cpu(cpu); 553
345 uint64_t en0 = 554 if (cd.s.line == 0) {
346 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 555 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
347 if (cpumask_test_cpu(cpu, dest) && enable_one) { 556 for_each_online_cpu(cpu) {
348 enable_one = 0; 557 int coreid = octeon_coreid_for_cpu(cpu);
349 en0 |= 1ull << bit; 558 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
350 } else { 559
351 en0 &= ~(1ull << bit); 560 if (cpumask_test_cpu(cpu, dest) && enable_one) {
561 enable_one = false;
562 set_bit(cd.s.bit, pen);
563 } else {
564 clear_bit(cd.s.bit, pen);
565 }
566 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
352 } 567 }
353 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 568 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
569 } else {
570 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
571 for_each_online_cpu(cpu) {
572 int coreid = octeon_coreid_for_cpu(cpu);
573 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
574
575 if (cpumask_test_cpu(cpu, dest) && enable_one) {
576 enable_one = false;
577 set_bit(cd.s.bit, pen);
578 } else {
579 clear_bit(cd.s.bit, pen);
580 }
581 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
582 }
583 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
354 } 584 }
355 /*
356 * We need to do a read after the last update to make sure all
357 * of them are done.
358 */
359 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
360 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
361
362 return 0; 585 return 0;
363} 586}
364 587
@@ -366,22 +589,46 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *
366 * Set affinity for the irq for chips that have the EN*_W1{S,C} 589 * Set affinity for the irq for chips that have the EN*_W1{S,C}
367 * registers. 590 * registers.
368 */ 591 */
369static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, 592static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
370 const struct cpumask *dest) 593 const struct cpumask *dest,
594 bool force)
371{ 595{
372 int cpu; 596 int cpu;
373 int index; 597 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
374 struct irq_desc *desc = irq_to_desc(irq); 598 u64 mask;
375 int enable_one = (desc->status & IRQ_DISABLED) == 0; 599 union octeon_ciu_chip_data cd;
376 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 600
377 601 if (!enable_one)
378 for_each_online_cpu(cpu) { 602 return 0;
379 index = octeon_coreid_for_cpu(cpu) * 2; 603
380 if (cpumask_test_cpu(cpu, dest) && enable_one) { 604 cd.p = data->chip_data;
381 enable_one = 0; 605 mask = 1ull << cd.s.bit;
382 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 606
383 } else { 607 if (cd.s.line == 0) {
384 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 608 for_each_online_cpu(cpu) {
609 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
610 int index = octeon_coreid_for_cpu(cpu) * 2;
611 if (cpumask_test_cpu(cpu, dest) && enable_one) {
612 enable_one = false;
613 set_bit(cd.s.bit, pen);
614 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
615 } else {
616 clear_bit(cd.s.bit, pen);
617 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
618 }
619 }
620 } else {
621 for_each_online_cpu(cpu) {
622 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
623 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 enable_one = false;
626 set_bit(cd.s.bit, pen);
627 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
628 } else {
629 clear_bit(cd.s.bit, pen);
630 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
631 }
385 } 632 }
386 } 633 }
387 return 0; 634 return 0;
@@ -389,80 +636,102 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
389#endif 636#endif
390 637
391/* 638/*
639 * The v1 CIU code already masks things, so supply a dummy version to
640 * the core chip code.
641 */
642static void octeon_irq_dummy_mask(struct irq_data *data)
643{
644}
645
646/*
392 * Newer octeon chips have support for lockless CIU operation. 647 * Newer octeon chips have support for lockless CIU operation.
393 */ 648 */
394static struct irq_chip octeon_irq_chip_ciu0_v2 = { 649static struct irq_chip octeon_irq_chip_ciu_v2 = {
395 .name = "CIU0", 650 .name = "CIU",
396 .enable = octeon_irq_ciu0_enable_v2, 651 .irq_enable = octeon_irq_ciu_enable_v2,
397 .disable = octeon_irq_ciu0_disable_all_v2, 652 .irq_disable = octeon_irq_ciu_disable_all_v2,
398 .eoi = octeon_irq_ciu0_enable_v2, 653 .irq_mask = octeon_irq_ciu_disable_local_v2,
654 .irq_unmask = octeon_irq_ciu_enable_v2,
399#ifdef CONFIG_SMP 655#ifdef CONFIG_SMP
400 .set_affinity = octeon_irq_ciu0_set_affinity_v2, 656 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
657 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
401#endif 658#endif
402}; 659};
403 660
404static struct irq_chip octeon_irq_chip_ciu0 = { 661static struct irq_chip octeon_irq_chip_ciu_edge_v2 = {
405 .name = "CIU0", 662 .name = "CIU-E",
406 .enable = octeon_irq_ciu0_enable, 663 .irq_enable = octeon_irq_ciu_enable_v2,
407 .disable = octeon_irq_ciu0_disable, 664 .irq_disable = octeon_irq_ciu_disable_all_v2,
408 .eoi = octeon_irq_ciu0_eoi, 665 .irq_ack = octeon_irq_ciu_ack,
666 .irq_mask = octeon_irq_ciu_disable_local_v2,
667 .irq_unmask = octeon_irq_ciu_enable_v2,
409#ifdef CONFIG_SMP 668#ifdef CONFIG_SMP
410 .set_affinity = octeon_irq_ciu0_set_affinity, 669 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
670 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
411#endif 671#endif
412}; 672};
413 673
414/* The mbox versions don't do any affinity or round-robin. */ 674static struct irq_chip octeon_irq_chip_ciu = {
415static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { 675 .name = "CIU",
416 .name = "CIU0-M", 676 .irq_enable = octeon_irq_ciu_enable,
417 .enable = octeon_irq_ciu0_enable_mbox_v2, 677 .irq_disable = octeon_irq_ciu_disable_all,
418 .disable = octeon_irq_ciu0_disable, 678 .irq_mask = octeon_irq_dummy_mask,
419 .eoi = octeon_irq_ciu0_eoi_mbox_v2, 679#ifdef CONFIG_SMP
680 .irq_set_affinity = octeon_irq_ciu_set_affinity,
681 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
682#endif
420}; 683};
421 684
422static struct irq_chip octeon_irq_chip_ciu0_mbox = { 685static struct irq_chip octeon_irq_chip_ciu_edge = {
423 .name = "CIU0-M", 686 .name = "CIU-E",
424 .enable = octeon_irq_ciu0_enable_mbox, 687 .irq_enable = octeon_irq_ciu_enable,
425 .disable = octeon_irq_ciu0_disable, 688 .irq_disable = octeon_irq_ciu_disable_all,
426 .eoi = octeon_irq_ciu0_eoi, 689 .irq_mask = octeon_irq_dummy_mask,
690 .irq_ack = octeon_irq_ciu_ack,
691#ifdef CONFIG_SMP
692 .irq_set_affinity = octeon_irq_ciu_set_affinity,
693 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
694#endif
427}; 695};
428 696
429static void octeon_irq_ciu1_ack(unsigned int irq) 697/* The mbox versions don't do any affinity or round-robin. */
430{ 698static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
431 /* 699 .name = "CIU-M",
432 * In order to avoid any locking accessing the CIU, we 700 .irq_enable = octeon_irq_ciu_enable_all_v2,
433 * acknowledge CIU interrupts by disabling all of them. This 701 .irq_disable = octeon_irq_ciu_disable_all_v2,
434 * way we can use a per core register and avoid any out of 702 .irq_ack = octeon_irq_ciu_disable_local_v2,
435 * core locking requirements. This has the side affect that 703 .irq_eoi = octeon_irq_ciu_enable_local_v2,
436 * CIU interrupts can't be processed recursively. We don't 704
437 * need to disable IRQs to make these atomic since they are 705 .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
438 * already disabled earlier in the low level interrupt code. 706 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
439 */ 707 .flags = IRQCHIP_ONOFFLINE_ENABLED,
440 clear_c0_status(0x100 << 3); 708};
441}
442 709
443static void octeon_irq_ciu1_eoi(unsigned int irq) 710static struct irq_chip octeon_irq_chip_ciu_mbox = {
444{ 711 .name = "CIU-M",
445 /* 712 .irq_enable = octeon_irq_ciu_enable_all,
446 * Enable all CIU interrupts again. We don't need to disable 713 .irq_disable = octeon_irq_ciu_disable_all,
447 * IRQs to make these atomic since they are already disabled 714
448 * earlier in the low level interrupt code. 715 .irq_cpu_online = octeon_irq_ciu_enable_local,
449 */ 716 .irq_cpu_offline = octeon_irq_ciu_disable_local,
450 set_c0_status(0x100 << 3); 717 .flags = IRQCHIP_ONOFFLINE_ENABLED,
451} 718};
452 719
453static void octeon_irq_ciu1_enable(unsigned int irq) 720/*
721 * Watchdog interrupts are special. They are associated with a single
722 * core, so we hardwire the affinity to that core.
723 */
724static void octeon_irq_ciu_wd_enable(struct irq_data *data)
454{ 725{
455 struct irq_desc *desc = irq_to_desc(irq);
456 int coreid = next_coreid_for_irq(desc);
457 unsigned long flags; 726 unsigned long flags;
458 uint64_t en1; 727 unsigned long *pen;
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 728 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
729 int cpu = octeon_cpu_for_coreid(coreid);
460 730
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 731 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 732 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
463 en1 |= 1ull << bit; 733 set_bit(coreid, pen);
464 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 734 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
466 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 735 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
467} 736}
468 737
@@ -470,286 +739,281 @@ static void octeon_irq_ciu1_enable(unsigned int irq)
470 * Watchdog interrupts are special. They are associated with a single 739 * Watchdog interrupts are special. They are associated with a single
471 * core, so we hardwire the affinity to that core. 740 * core, so we hardwire the affinity to that core.
472 */ 741 */
473static void octeon_irq_ciu1_wd_enable(unsigned int irq) 742static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
474{ 743{
475 unsigned long flags; 744 int coreid = data->irq - OCTEON_IRQ_WDOG0;
476 uint64_t en1; 745 int cpu = octeon_cpu_for_coreid(coreid);
477 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
478 int coreid = bit;
479 746
480 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 747 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
481 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 748 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
482 en1 |= 1ull << bit;
483 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
484 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
485 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
486} 749}
487 750
488static void octeon_irq_ciu1_disable(unsigned int irq) 751
752static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
753 .name = "CIU-W",
754 .irq_enable = octeon_irq_ciu1_wd_enable_v2,
755 .irq_disable = octeon_irq_ciu_disable_all_v2,
756 .irq_mask = octeon_irq_ciu_disable_local_v2,
757 .irq_unmask = octeon_irq_ciu_enable_local_v2,
758};
759
760static struct irq_chip octeon_irq_chip_ciu_wd = {
761 .name = "CIU-W",
762 .irq_enable = octeon_irq_ciu_wd_enable,
763 .irq_disable = octeon_irq_ciu_disable_all,
764 .irq_mask = octeon_irq_dummy_mask,
765};
766
767static void octeon_irq_ip2_v1(void)
489{ 768{
490 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 769 const unsigned long core_id = cvmx_get_core_num();
491 unsigned long flags; 770 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
492 uint64_t en1; 771
493 int cpu; 772 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
494 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 773 clear_c0_status(STATUSF_IP2);
495 for_each_online_cpu(cpu) { 774 if (likely(ciu_sum)) {
496 int coreid = octeon_coreid_for_cpu(cpu); 775 int bit = fls64(ciu_sum) - 1;
497 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 776 int irq = octeon_irq_ciu_to_irq[0][bit];
498 en1 &= ~(1ull << bit); 777 if (likely(irq))
499 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 778 do_IRQ(irq);
779 else
780 spurious_interrupt();
781 } else {
782 spurious_interrupt();
500 } 783 }
501 /* 784 set_c0_status(STATUSF_IP2);
502 * We need to do a read after the last update to make sure all
503 * of them are done.
504 */
505 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
506 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
507} 785}
508 786
509/* 787static void octeon_irq_ip2_v2(void)
510 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
511 * registers.
512 */
513static void octeon_irq_ciu1_enable_v2(unsigned int irq)
514{ 788{
515 int index; 789 const unsigned long core_id = cvmx_get_core_num();
516 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 790 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
517 struct irq_desc *desc = irq_to_desc(irq); 791
518 792 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
519 if ((desc->status & IRQ_DISABLED) == 0) { 793 if (likely(ciu_sum)) {
520 index = next_coreid_for_irq(desc) * 2 + 1; 794 int bit = fls64(ciu_sum) - 1;
521 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 795 int irq = octeon_irq_ciu_to_irq[0][bit];
796 if (likely(irq))
797 do_IRQ(irq);
798 else
799 spurious_interrupt();
800 } else {
801 spurious_interrupt();
522 } 802 }
523} 803}
524 804static void octeon_irq_ip3_v1(void)
525/*
526 * Watchdog interrupts are special. They are associated with a single
527 * core, so we hardwire the affinity to that core.
528 */
529static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
530{ 805{
531 int index; 806 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
532 int coreid = irq - OCTEON_IRQ_WDOG0; 807
533 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 808 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
534 struct irq_desc *desc = irq_to_desc(irq); 809 clear_c0_status(STATUSF_IP3);
535 810 if (likely(ciu_sum)) {
536 if ((desc->status & IRQ_DISABLED) == 0) { 811 int bit = fls64(ciu_sum) - 1;
537 index = coreid * 2 + 1; 812 int irq = octeon_irq_ciu_to_irq[1][bit];
538 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 813 if (likely(irq))
814 do_IRQ(irq);
815 else
816 spurious_interrupt();
817 } else {
818 spurious_interrupt();
539 } 819 }
820 set_c0_status(STATUSF_IP3);
540} 821}
541 822
542/* 823static void octeon_irq_ip3_v2(void)
543 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
544 * registers.
545 */
546static void octeon_irq_ciu1_ack_v2(unsigned int irq)
547{ 824{
548 int index = cvmx_get_core_num() * 2 + 1; 825 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
549 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 826
550 827 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
551 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 828 if (likely(ciu_sum)) {
829 int bit = fls64(ciu_sum) - 1;
830 int irq = octeon_irq_ciu_to_irq[1][bit];
831 if (likely(irq))
832 do_IRQ(irq);
833 else
834 spurious_interrupt();
835 } else {
836 spurious_interrupt();
837 }
552} 838}
553 839
554/* 840static void octeon_irq_ip4_mask(void)
555 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
556 * registers.
557 */
558static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
559{ 841{
560 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 842 clear_c0_status(STATUSF_IP4);
561 int index; 843 spurious_interrupt();
562 int cpu;
563 for_each_online_cpu(cpu) {
564 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
565 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
566 }
567} 844}
568 845
569#ifdef CONFIG_SMP 846static void (*octeon_irq_ip2)(void);
570static int octeon_irq_ciu1_set_affinity(unsigned int irq, 847static void (*octeon_irq_ip3)(void);
571 const struct cpumask *dest) 848static void (*octeon_irq_ip4)(void);
572{
573 int cpu;
574 struct irq_desc *desc = irq_to_desc(irq);
575 int enable_one = (desc->status & IRQ_DISABLED) == 0;
576 unsigned long flags;
577 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
578 849
579 /* 850void __cpuinitdata (*octeon_irq_setup_secondary)(void);
580 * For non-v2 CIU, we will allow only single CPU affinity.
581 * This removes the need to do locking in the .ack/.eoi
582 * functions.
583 */
584 if (cpumask_weight(dest) != 1)
585 return -EINVAL;
586 851
587 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 852static void __cpuinit octeon_irq_percpu_enable(void)
588 for_each_online_cpu(cpu) { 853{
589 int coreid = octeon_coreid_for_cpu(cpu); 854 irq_cpu_online();
590 uint64_t en1 = 855}
591 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 856
592 if (cpumask_test_cpu(cpu, dest) && enable_one) { 857static void __cpuinit octeon_irq_init_ciu_percpu(void)
593 enable_one = 0; 858{
594 en1 |= 1ull << bit; 859 int coreid = cvmx_get_core_num();
595 } else {
596 en1 &= ~(1ull << bit);
597 }
598 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
599 }
600 /* 860 /*
601 * We need to do a read after the last update to make sure all 861 * Disable All CIU Interrupts. The ones we need will be
602 * of them are done. 862 * enabled later. Read the SUM register so we know the write
863 * completed.
603 */ 864 */
604 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 865 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
605 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 866 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
606 867 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
607 return 0; 868 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
869 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
608} 870}
609 871
610/* 872static void __cpuinit octeon_irq_setup_secondary_ciu(void)
611 * Set affinity for the irq for chips that have the EN*_W1{S,C}
612 * registers.
613 */
614static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
615 const struct cpumask *dest)
616{ 873{
617 int cpu;
618 int index;
619 struct irq_desc *desc = irq_to_desc(irq);
620 int enable_one = (desc->status & IRQ_DISABLED) == 0;
621 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
622 for_each_online_cpu(cpu) {
623 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 enable_one = 0;
626 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
627 } else {
628 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
629 }
630 }
631 return 0;
632}
633#endif
634 874
635/* 875 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
636 * Newer octeon chips have support for lockless CIU operation. 876 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
637 */
638static struct irq_chip octeon_irq_chip_ciu1_v2 = {
639 .name = "CIU1",
640 .enable = octeon_irq_ciu1_enable_v2,
641 .disable = octeon_irq_ciu1_disable_all_v2,
642 .eoi = octeon_irq_ciu1_enable_v2,
643#ifdef CONFIG_SMP
644 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
645#endif
646};
647 877
648static struct irq_chip octeon_irq_chip_ciu1 = { 878 octeon_irq_init_ciu_percpu();
649 .name = "CIU1", 879 octeon_irq_percpu_enable();
650 .enable = octeon_irq_ciu1_enable,
651 .disable = octeon_irq_ciu1_disable,
652 .eoi = octeon_irq_ciu1_eoi,
653#ifdef CONFIG_SMP
654 .set_affinity = octeon_irq_ciu1_set_affinity,
655#endif
656};
657 880
658static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { 881 /* Enable the CIU lines */
659 .name = "CIU1-W", 882 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
660 .enable = octeon_irq_ciu1_wd_enable_v2, 883 clear_c0_status(STATUSF_IP4);
661 .disable = octeon_irq_ciu1_disable_all_v2, 884}
662 .eoi = octeon_irq_ciu1_wd_enable_v2,
663};
664 885
665static struct irq_chip octeon_irq_chip_ciu1_wd = { 886static void __init octeon_irq_init_ciu(void)
666 .name = "CIU1-W", 887{
667 .enable = octeon_irq_ciu1_wd_enable, 888 unsigned int i;
668 .disable = octeon_irq_ciu1_disable, 889 struct irq_chip *chip;
669 .eoi = octeon_irq_ciu1_eoi, 890 struct irq_chip *chip_edge;
670}; 891 struct irq_chip *chip_mbox;
892 struct irq_chip *chip_wd;
893
894 octeon_irq_init_ciu_percpu();
895 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
671 896
672static void (*octeon_ciu0_ack)(unsigned int); 897 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
673static void (*octeon_ciu1_ack)(unsigned int); 898 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
899 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
900 OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
901 octeon_irq_ip2 = octeon_irq_ip2_v2;
902 octeon_irq_ip3 = octeon_irq_ip3_v2;
903 chip = &octeon_irq_chip_ciu_v2;
904 chip_edge = &octeon_irq_chip_ciu_edge_v2;
905 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
906 chip_wd = &octeon_irq_chip_ciu_wd_v2;
907 } else {
908 octeon_irq_ip2 = octeon_irq_ip2_v1;
909 octeon_irq_ip3 = octeon_irq_ip3_v1;
910 chip = &octeon_irq_chip_ciu;
911 chip_edge = &octeon_irq_chip_ciu_edge;
912 chip_mbox = &octeon_irq_chip_ciu_mbox;
913 chip_wd = &octeon_irq_chip_ciu_wd;
914 }
915 octeon_irq_ip4 = octeon_irq_ip4_mask;
916
917 /* Mips internal */
918 octeon_irq_init_core();
919
920 /* CIU_0 */
921 for (i = 0; i < 16; i++)
922 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
923 for (i = 0; i < 16; i++)
924 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq);
925
926 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
927 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
928
929 octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq);
930 octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq);
931
932 for (i = 0; i < 4; i++)
933 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
934 for (i = 0; i < 4; i++)
935 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
936
937 octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq);
938 octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
939 octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq);
940
941 for (i = 0; i < 2; i++)
942 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq);
943
944 octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq);
945 octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq);
946
947 for (i = 0; i < 4; i++)
948 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq);
949
950 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
951 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq);
952 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq);
953 octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq);
954 octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq);
955 octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq);
956 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
957 octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
958
959 /* CIU_1 */
960 for (i = 0; i < 16; i++)
961 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
962
963 octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
964 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
965 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
966 octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq);
967 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq);
968 octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq);
969 octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq);
970 octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq);
971 octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq);
972 octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq);
973 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq);
974 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq);
975 octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq);
976 octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq);
977 octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq);
978 octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq);
979 octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq);
980 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq);
981 octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq);
982 octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq);
983
984 octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq);
985
986 octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq);
987
988 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq);
989
990 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq);
991 octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq);
992 octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq);
993 octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq);
994 octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq);
995 octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq);
996 octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq);
997
998 /* Enable the CIU lines */
999 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1000 clear_c0_status(STATUSF_IP4);
1001}
674 1002
675void __init arch_init_irq(void) 1003void __init arch_init_irq(void)
676{ 1004{
677 unsigned int irq;
678 struct irq_chip *chip0;
679 struct irq_chip *chip0_mbox;
680 struct irq_chip *chip1;
681 struct irq_chip *chip1_wd;
682
683#ifdef CONFIG_SMP 1005#ifdef CONFIG_SMP
684 /* Set the default affinity to the boot cpu. */ 1006 /* Set the default affinity to the boot cpu. */
685 cpumask_clear(irq_default_affinity); 1007 cpumask_clear(irq_default_affinity);
686 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 1008 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
687#endif 1009#endif
688 1010 octeon_irq_init_ciu();
689 if (NR_IRQS < OCTEON_IRQ_LAST)
690 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
691
692 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
693 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
694 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
695 octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
697 chip0 = &octeon_irq_chip_ciu0_v2;
698 chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
699 chip1 = &octeon_irq_chip_ciu1_v2;
700 chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
701 } else {
702 octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 octeon_ciu1_ack = octeon_irq_ciu1_ack;
704 chip0 = &octeon_irq_chip_ciu0;
705 chip0_mbox = &octeon_irq_chip_ciu0_mbox;
706 chip1 = &octeon_irq_chip_ciu1;
707 chip1_wd = &octeon_irq_chip_ciu1_wd;
708 }
709
710 /* 0 - 15 reserved for i8259 master and slave controller. */
711
712 /* 17 - 23 Mips internal */
713 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
714 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
715 handle_percpu_irq);
716 }
717
718 /* 24 - 87 CIU_INT_SUM0 */
719 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
720 switch (irq) {
721 case OCTEON_IRQ_MBOX0:
722 case OCTEON_IRQ_MBOX1:
723 set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
724 break;
725 default:
726 set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
727 break;
728 }
729 }
730
731 /* 88 - 151 CIU_INT_SUM1 */
732 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
733 set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
734
735 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
737
738 set_c0_status(0x300 << 2);
739} 1011}
740 1012
741asmlinkage void plat_irq_dispatch(void) 1013asmlinkage void plat_irq_dispatch(void)
742{ 1014{
743 const unsigned long core_id = cvmx_get_core_num();
744 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
745 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
746 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
747 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
748 unsigned long cop0_cause; 1015 unsigned long cop0_cause;
749 unsigned long cop0_status; 1016 unsigned long cop0_status;
750 uint64_t ciu_en;
751 uint64_t ciu_sum;
752 unsigned int irq;
753 1017
754 while (1) { 1018 while (1) {
755 cop0_cause = read_c0_cause(); 1019 cop0_cause = read_c0_cause();
@@ -757,33 +1021,16 @@ asmlinkage void plat_irq_dispatch(void)
757 cop0_cause &= cop0_status; 1021 cop0_cause &= cop0_status;
758 cop0_cause &= ST0_IM; 1022 cop0_cause &= ST0_IM;
759 1023
760 if (unlikely(cop0_cause & STATUSF_IP2)) { 1024 if (unlikely(cop0_cause & STATUSF_IP2))
761 ciu_sum = cvmx_read_csr(ciu_sum0_address); 1025 octeon_irq_ip2();
762 ciu_en = cvmx_read_csr(ciu_en0_address); 1026 else if (unlikely(cop0_cause & STATUSF_IP3))
763 ciu_sum &= ciu_en; 1027 octeon_irq_ip3();
764 if (likely(ciu_sum)) { 1028 else if (unlikely(cop0_cause & STATUSF_IP4))
765 irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; 1029 octeon_irq_ip4();
766 octeon_ciu0_ack(irq); 1030 else if (likely(cop0_cause))
767 do_IRQ(irq);
768 } else {
769 spurious_interrupt();
770 }
771 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
772 ciu_sum = cvmx_read_csr(ciu_sum1_address);
773 ciu_en = cvmx_read_csr(ciu_en1_address);
774 ciu_sum &= ciu_en;
775 if (likely(ciu_sum)) {
776 irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
777 octeon_ciu1_ack(irq);
778 do_IRQ(irq);
779 } else {
780 spurious_interrupt();
781 }
782 } else if (likely(cop0_cause)) {
783 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 1031 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
784 } else { 1032 else
785 break; 1033 break;
786 }
787 } 1034 }
788} 1035}
789 1036
@@ -791,83 +1038,7 @@ asmlinkage void plat_irq_dispatch(void)
791 1038
792void fixup_irqs(void) 1039void fixup_irqs(void)
793{ 1040{
794 int irq; 1041 irq_cpu_offline();
795 struct irq_desc *desc;
796 cpumask_t new_affinity;
797 unsigned long flags;
798 int do_set_affinity;
799 int cpu;
800
801 cpu = smp_processor_id();
802
803 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
804 octeon_irq_core_disable_local(irq);
805
806 for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
807 desc = irq_to_desc(irq);
808 switch (irq) {
809 case OCTEON_IRQ_MBOX0:
810 case OCTEON_IRQ_MBOX1:
811 /* The eoi function will disable them on this CPU. */
812 desc->chip->eoi(irq);
813 break;
814 case OCTEON_IRQ_WDOG0:
815 case OCTEON_IRQ_WDOG1:
816 case OCTEON_IRQ_WDOG2:
817 case OCTEON_IRQ_WDOG3:
818 case OCTEON_IRQ_WDOG4:
819 case OCTEON_IRQ_WDOG5:
820 case OCTEON_IRQ_WDOG6:
821 case OCTEON_IRQ_WDOG7:
822 case OCTEON_IRQ_WDOG8:
823 case OCTEON_IRQ_WDOG9:
824 case OCTEON_IRQ_WDOG10:
825 case OCTEON_IRQ_WDOG11:
826 case OCTEON_IRQ_WDOG12:
827 case OCTEON_IRQ_WDOG13:
828 case OCTEON_IRQ_WDOG14:
829 case OCTEON_IRQ_WDOG15:
830 /*
831 * These have special per CPU semantics and
832 * are handled in the watchdog driver.
833 */
834 break;
835 default:
836 raw_spin_lock_irqsave(&desc->lock, flags);
837 /*
838 * If this irq has an action, it is in use and
839 * must be migrated if it has affinity to this
840 * cpu.
841 */
842 if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
843 if (cpumask_weight(desc->affinity) > 1) {
844 /*
845 * It has multi CPU affinity,
846 * just remove this CPU from
847 * the affinity set.
848 */
849 cpumask_copy(&new_affinity, desc->affinity);
850 cpumask_clear_cpu(cpu, &new_affinity);
851 } else {
852 /*
853 * Otherwise, put it on lowest
854 * numbered online CPU.
855 */
856 cpumask_clear(&new_affinity);
857 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
858 }
859 do_set_affinity = 1;
860 } else {
861 do_set_affinity = 0;
862 }
863 raw_spin_unlock_irqrestore(&desc->lock, flags);
864
865 if (do_set_affinity)
866 irq_set_affinity(irq, &new_affinity);
867
868 break;
869 }
870 }
871} 1042}
872 1043
873#endif /* CONFIG_HOTPLUG_CPU */ 1044#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index cecaf62aef3..cd61d7281d9 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -75,7 +75,7 @@ static int __init octeon_cf_device_init(void)
75 * zero. 75 * zero.
76 */ 76 */
77 77
78 /* Asume that CS1 immediately follows. */ 78 /* Assume that CS1 immediately follows. */
79 mio_boot_reg_cfg.u64 = 79 mio_boot_reg_cfg.u64 =
80 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); 80 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
81 region_base = mio_boot_reg_cfg.s.base << 16; 81 region_base = mio_boot_reg_cfg.s.base << 16;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b0c3686c96d..0707fae3f0e 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -420,7 +420,6 @@ void octeon_user_io_init(void)
420void __init prom_init(void) 420void __init prom_init(void)
421{ 421{
422 struct cvmx_sysinfo *sysinfo; 422 struct cvmx_sysinfo *sysinfo;
423 const int coreid = cvmx_get_core_num();
424 int i; 423 int i;
425 int argc; 424 int argc;
426#ifdef CONFIG_CAVIUM_RESERVE32 425#ifdef CONFIG_CAVIUM_RESERVE32
@@ -537,17 +536,6 @@ void __init prom_init(void)
537 536
538 octeon_uart = octeon_get_boot_uart(); 537 octeon_uart = octeon_get_boot_uart();
539 538
540 /*
541 * Disable All CIU Interrupts. The ones we need will be
542 * enabled later. Read the SUM register so we know the write
543 * completed.
544 */
545 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
546 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
547 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
548 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
549 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
550
551#ifdef CONFIG_SMP 539#ifdef CONFIG_SMP
552 octeon_write_lcd("LinuxSMP"); 540 octeon_write_lcd("LinuxSMP");
553#else 541#else
@@ -674,7 +662,7 @@ void __init plat_mem_setup(void)
674 * some memory vectors. When SPARSEMEM is in use, it doesn't 662 * some memory vectors. When SPARSEMEM is in use, it doesn't
675 * verify that the size is big enough for the final 663 * verify that the size is big enough for the final
676 * vectors. Making the smallest chuck 4MB seems to be enough 664 * vectors. Making the smallest chuck 4MB seems to be enough
677 * to consistantly work. 665 * to consistently work.
678 */ 666 */
679 mem_alloc_size = 4 << 20; 667 mem_alloc_size = 4 << 20;
680 if (mem_alloc_size > MAX_MEMORY) 668 if (mem_alloc_size > MAX_MEMORY)
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 391cefe556b..ba78b21cc8d 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -171,41 +171,19 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle)
171 * After we've done initial boot, this function is called to allow the 171 * After we've done initial boot, this function is called to allow the
172 * board code to clean up state, if needed 172 * board code to clean up state, if needed
173 */ 173 */
174static void octeon_init_secondary(void) 174static void __cpuinit octeon_init_secondary(void)
175{ 175{
176 const int coreid = cvmx_get_core_num();
177 union cvmx_ciu_intx_sum0 interrupt_enable;
178 unsigned int sr; 176 unsigned int sr;
179 177
180#ifdef CONFIG_HOTPLUG_CPU
181 struct linux_app_boot_info *labi;
182
183 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
184
185 if (labi->labi_signature != LABI_SIGNATURE)
186 panic("The bootloader version on this board is incorrect.");
187#endif
188
189 sr = set_c0_status(ST0_BEV); 178 sr = set_c0_status(ST0_BEV);
190 write_c0_ebase((u32)ebase); 179 write_c0_ebase((u32)ebase);
191 write_c0_status(sr); 180 write_c0_status(sr);
192 181
193 octeon_check_cpu_bist(); 182 octeon_check_cpu_bist();
194 octeon_init_cvmcount(); 183 octeon_init_cvmcount();
195 /* 184
196 pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); 185 octeon_irq_setup_secondary();
197 */ 186 raw_local_irq_enable();
198 /* Enable Mailbox interrupts to this core. These are the only
199 interrupts allowed on line 3 */
200 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
201 interrupt_enable.u64 = 0;
202 interrupt_enable.s.mbox = 0x3;
203 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
204 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
205 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
206 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
207 /* Enable core interrupt processing for 2,3 and 7 */
208 set_c0_status(0x8c01);
209} 187}
210 188
211/** 189/**
@@ -214,6 +192,15 @@ static void octeon_init_secondary(void)
214 */ 192 */
215void octeon_prepare_cpus(unsigned int max_cpus) 193void octeon_prepare_cpus(unsigned int max_cpus)
216{ 194{
195#ifdef CONFIG_HOTPLUG_CPU
196 struct linux_app_boot_info *labi;
197
198 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
199
200 if (labi->labi_signature != LABI_SIGNATURE)
201 panic("The bootloader version on this board is incorrect.");
202#endif
203
217 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); 204 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
218 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, 205 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
219 "mailbox0", mailbox_interrupt)) { 206 "mailbox0", mailbox_interrupt)) {
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index cb41954fc32..824e08c7379 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -17,80 +17,48 @@
17#include <asm/dec/ioasic_addrs.h> 17#include <asm/dec/ioasic_addrs.h>
18#include <asm/dec/ioasic_ints.h> 18#include <asm/dec/ioasic_ints.h>
19 19
20
21static int ioasic_irq_base; 20static int ioasic_irq_base;
22 21
23 22static void unmask_ioasic_irq(struct irq_data *d)
24static inline void unmask_ioasic_irq(unsigned int irq)
25{ 23{
26 u32 simr; 24 u32 simr;
27 25
28 simr = ioasic_read(IO_REG_SIMR); 26 simr = ioasic_read(IO_REG_SIMR);
29 simr |= (1 << (irq - ioasic_irq_base)); 27 simr |= (1 << (d->irq - ioasic_irq_base));
30 ioasic_write(IO_REG_SIMR, simr); 28 ioasic_write(IO_REG_SIMR, simr);
31} 29}
32 30
33static inline void mask_ioasic_irq(unsigned int irq) 31static void mask_ioasic_irq(struct irq_data *d)
34{ 32{
35 u32 simr; 33 u32 simr;
36 34
37 simr = ioasic_read(IO_REG_SIMR); 35 simr = ioasic_read(IO_REG_SIMR);
38 simr &= ~(1 << (irq - ioasic_irq_base)); 36 simr &= ~(1 << (d->irq - ioasic_irq_base));
39 ioasic_write(IO_REG_SIMR, simr); 37 ioasic_write(IO_REG_SIMR, simr);
40} 38}
41 39
42static inline void clear_ioasic_irq(unsigned int irq) 40static void ack_ioasic_irq(struct irq_data *d)
43{ 41{
44 u32 sir; 42 mask_ioasic_irq(d);
45
46 sir = ~(1 << (irq - ioasic_irq_base));
47 ioasic_write(IO_REG_SIR, sir);
48}
49
50static inline void ack_ioasic_irq(unsigned int irq)
51{
52 mask_ioasic_irq(irq);
53 fast_iob(); 43 fast_iob();
54} 44}
55 45
56static inline void end_ioasic_irq(unsigned int irq)
57{
58 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
59 unmask_ioasic_irq(irq);
60}
61
62static struct irq_chip ioasic_irq_type = { 46static struct irq_chip ioasic_irq_type = {
63 .name = "IO-ASIC", 47 .name = "IO-ASIC",
64 .ack = ack_ioasic_irq, 48 .irq_ack = ack_ioasic_irq,
65 .mask = mask_ioasic_irq, 49 .irq_mask = mask_ioasic_irq,
66 .mask_ack = ack_ioasic_irq, 50 .irq_mask_ack = ack_ioasic_irq,
67 .unmask = unmask_ioasic_irq, 51 .irq_unmask = unmask_ioasic_irq,
68}; 52};
69 53
70
71#define unmask_ioasic_dma_irq unmask_ioasic_irq
72
73#define mask_ioasic_dma_irq mask_ioasic_irq
74
75#define ack_ioasic_dma_irq ack_ioasic_irq
76
77static inline void end_ioasic_dma_irq(unsigned int irq)
78{
79 clear_ioasic_irq(irq);
80 fast_iob();
81 end_ioasic_irq(irq);
82}
83
84static struct irq_chip ioasic_dma_irq_type = { 54static struct irq_chip ioasic_dma_irq_type = {
85 .name = "IO-ASIC-DMA", 55 .name = "IO-ASIC-DMA",
86 .ack = ack_ioasic_dma_irq, 56 .irq_ack = ack_ioasic_irq,
87 .mask = mask_ioasic_dma_irq, 57 .irq_mask = mask_ioasic_irq,
88 .mask_ack = ack_ioasic_dma_irq, 58 .irq_mask_ack = ack_ioasic_irq,
89 .unmask = unmask_ioasic_dma_irq, 59 .irq_unmask = unmask_ioasic_irq,
90 .end = end_ioasic_dma_irq,
91}; 60};
92 61
93
94void __init init_ioasic_irqs(int base) 62void __init init_ioasic_irqs(int base)
95{ 63{
96 int i; 64 int i;
@@ -100,10 +68,10 @@ void __init init_ioasic_irqs(int base)
100 fast_iob(); 68 fast_iob();
101 69
102 for (i = base; i < base + IO_INR_DMA; i++) 70 for (i = base; i < base + IO_INR_DMA; i++)
103 set_irq_chip_and_handler(i, &ioasic_irq_type, 71 irq_set_chip_and_handler(i, &ioasic_irq_type,
104 handle_level_irq); 72 handle_level_irq);
105 for (; i < base + IO_IRQ_LINES; i++) 73 for (; i < base + IO_IRQ_LINES; i++)
106 set_irq_chip(i, &ioasic_dma_irq_type); 74 irq_set_chip(i, &ioasic_dma_irq_type);
107 75
108 ioasic_irq_base = base; 76 ioasic_irq_base = base;
109} 77}
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index ed90a8deabc..37199f742c4 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -27,43 +27,40 @@
27 */ 27 */
28u32 cached_kn02_csr; 28u32 cached_kn02_csr;
29 29
30
31static int kn02_irq_base; 30static int kn02_irq_base;
32 31
33 32static void unmask_kn02_irq(struct irq_data *d)
34static inline void unmask_kn02_irq(unsigned int irq)
35{ 33{
36 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + 34 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
37 KN02_CSR); 35 KN02_CSR);
38 36
39 cached_kn02_csr |= (1 << (irq - kn02_irq_base + 16)); 37 cached_kn02_csr |= (1 << (d->irq - kn02_irq_base + 16));
40 *csr = cached_kn02_csr; 38 *csr = cached_kn02_csr;
41} 39}
42 40
43static inline void mask_kn02_irq(unsigned int irq) 41static void mask_kn02_irq(struct irq_data *d)
44{ 42{
45 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + 43 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
46 KN02_CSR); 44 KN02_CSR);
47 45
48 cached_kn02_csr &= ~(1 << (irq - kn02_irq_base + 16)); 46 cached_kn02_csr &= ~(1 << (d->irq - kn02_irq_base + 16));
49 *csr = cached_kn02_csr; 47 *csr = cached_kn02_csr;
50} 48}
51 49
52static void ack_kn02_irq(unsigned int irq) 50static void ack_kn02_irq(struct irq_data *d)
53{ 51{
54 mask_kn02_irq(irq); 52 mask_kn02_irq(d);
55 iob(); 53 iob();
56} 54}
57 55
58static struct irq_chip kn02_irq_type = { 56static struct irq_chip kn02_irq_type = {
59 .name = "KN02-CSR", 57 .name = "KN02-CSR",
60 .ack = ack_kn02_irq, 58 .irq_ack = ack_kn02_irq,
61 .mask = mask_kn02_irq, 59 .irq_mask = mask_kn02_irq,
62 .mask_ack = ack_kn02_irq, 60 .irq_mask_ack = ack_kn02_irq,
63 .unmask = unmask_kn02_irq, 61 .irq_unmask = unmask_kn02_irq,
64}; 62};
65 63
66
67void __init init_kn02_irqs(int base) 64void __init init_kn02_irqs(int base)
68{ 65{
69 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + 66 volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
@@ -76,7 +73,7 @@ void __init init_kn02_irqs(int base)
76 iob(); 73 iob();
77 74
78 for (i = base; i < base + KN02_IRQ_LINES; i++) 75 for (i = base; i < base + KN02_IRQ_LINES; i++)
79 set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq); 76 irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
80 77
81 kn02_irq_base = base; 78 kn02_irq_base = base;
82} 79}
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index 3a96799eb65..3dbd7a5a6ad 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -34,13 +34,10 @@
34 34
35#include <asm/emma/emma2rh.h> 35#include <asm/emma/emma2rh.h>
36 36
37static void emma2rh_irq_enable(unsigned int irq) 37static void emma2rh_irq_enable(struct irq_data *d)
38{ 38{
39 u32 reg_value; 39 unsigned int irq = d->irq - EMMA2RH_IRQ_BASE;
40 u32 reg_bitmask; 40 u32 reg_value, reg_bitmask, reg_index;
41 u32 reg_index;
42
43 irq -= EMMA2RH_IRQ_BASE;
44 41
45 reg_index = EMMA2RH_BHIF_INT_EN_0 + 42 reg_index = EMMA2RH_BHIF_INT_EN_0 +
46 (EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32); 43 (EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32);
@@ -49,13 +46,10 @@ static void emma2rh_irq_enable(unsigned int irq)
49 emma2rh_out32(reg_index, reg_value | reg_bitmask); 46 emma2rh_out32(reg_index, reg_value | reg_bitmask);
50} 47}
51 48
52static void emma2rh_irq_disable(unsigned int irq) 49static void emma2rh_irq_disable(struct irq_data *d)
53{ 50{
54 u32 reg_value; 51 unsigned int irq = d->irq - EMMA2RH_IRQ_BASE;
55 u32 reg_bitmask; 52 u32 reg_value, reg_bitmask, reg_index;
56 u32 reg_index;
57
58 irq -= EMMA2RH_IRQ_BASE;
59 53
60 reg_index = EMMA2RH_BHIF_INT_EN_0 + 54 reg_index = EMMA2RH_BHIF_INT_EN_0 +
61 (EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32); 55 (EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32);
@@ -66,10 +60,8 @@ static void emma2rh_irq_disable(unsigned int irq)
66 60
67struct irq_chip emma2rh_irq_controller = { 61struct irq_chip emma2rh_irq_controller = {
68 .name = "emma2rh_irq", 62 .name = "emma2rh_irq",
69 .ack = emma2rh_irq_disable, 63 .irq_mask = emma2rh_irq_disable,
70 .mask = emma2rh_irq_disable, 64 .irq_unmask = emma2rh_irq_enable,
71 .mask_ack = emma2rh_irq_disable,
72 .unmask = emma2rh_irq_enable,
73}; 65};
74 66
75void emma2rh_irq_init(void) 67void emma2rh_irq_init(void)
@@ -77,28 +69,26 @@ void emma2rh_irq_init(void)
77 u32 i; 69 u32 i;
78 70
79 for (i = 0; i < NUM_EMMA2RH_IRQ; i++) 71 for (i = 0; i < NUM_EMMA2RH_IRQ; i++)
80 set_irq_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, 72 irq_set_chip_and_handler_name(EMMA2RH_IRQ_BASE + i,
81 &emma2rh_irq_controller, 73 &emma2rh_irq_controller,
82 handle_level_irq, "level"); 74 handle_level_irq, "level");
83} 75}
84 76
85static void emma2rh_sw_irq_enable(unsigned int irq) 77static void emma2rh_sw_irq_enable(struct irq_data *d)
86{ 78{
79 unsigned int irq = d->irq - EMMA2RH_SW_IRQ_BASE;
87 u32 reg; 80 u32 reg;
88 81
89 irq -= EMMA2RH_SW_IRQ_BASE;
90
91 reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN); 82 reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN);
92 reg |= 1 << irq; 83 reg |= 1 << irq;
93 emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg); 84 emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg);
94} 85}
95 86
96static void emma2rh_sw_irq_disable(unsigned int irq) 87static void emma2rh_sw_irq_disable(struct irq_data *d)
97{ 88{
89 unsigned int irq = d->irq - EMMA2RH_SW_IRQ_BASE;
98 u32 reg; 90 u32 reg;
99 91
100 irq -= EMMA2RH_SW_IRQ_BASE;
101
102 reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN); 92 reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN);
103 reg &= ~(1 << irq); 93 reg &= ~(1 << irq);
104 emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg); 94 emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg);
@@ -106,10 +96,8 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
106 96
107struct irq_chip emma2rh_sw_irq_controller = { 97struct irq_chip emma2rh_sw_irq_controller = {
108 .name = "emma2rh_sw_irq", 98 .name = "emma2rh_sw_irq",
109 .ack = emma2rh_sw_irq_disable, 99 .irq_mask = emma2rh_sw_irq_disable,
110 .mask = emma2rh_sw_irq_disable, 100 .irq_unmask = emma2rh_sw_irq_enable,
111 .mask_ack = emma2rh_sw_irq_disable,
112 .unmask = emma2rh_sw_irq_enable,
113}; 101};
114 102
115void emma2rh_sw_irq_init(void) 103void emma2rh_sw_irq_init(void)
@@ -117,44 +105,43 @@ void emma2rh_sw_irq_init(void)
117 u32 i; 105 u32 i;
118 106
119 for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++) 107 for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++)
120 set_irq_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i, 108 irq_set_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i,
121 &emma2rh_sw_irq_controller, 109 &emma2rh_sw_irq_controller,
122 handle_level_irq, "level"); 110 handle_level_irq, "level");
123} 111}
124 112
125static void emma2rh_gpio_irq_enable(unsigned int irq) 113static void emma2rh_gpio_irq_enable(struct irq_data *d)
126{ 114{
115 unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
127 u32 reg; 116 u32 reg;
128 117
129 irq -= EMMA2RH_GPIO_IRQ_BASE;
130
131 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK); 118 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
132 reg |= 1 << irq; 119 reg |= 1 << irq;
133 emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg); 120 emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg);
134} 121}
135 122
136static void emma2rh_gpio_irq_disable(unsigned int irq) 123static void emma2rh_gpio_irq_disable(struct irq_data *d)
137{ 124{
125 unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
138 u32 reg; 126 u32 reg;
139 127
140 irq -= EMMA2RH_GPIO_IRQ_BASE;
141
142 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK); 128 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
143 reg &= ~(1 << irq); 129 reg &= ~(1 << irq);
144 emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg); 130 emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg);
145} 131}
146 132
147static void emma2rh_gpio_irq_ack(unsigned int irq) 133static void emma2rh_gpio_irq_ack(struct irq_data *d)
148{ 134{
149 irq -= EMMA2RH_GPIO_IRQ_BASE; 135 unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
136
150 emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq)); 137 emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq));
151} 138}
152 139
153static void emma2rh_gpio_irq_mask_ack(unsigned int irq) 140static void emma2rh_gpio_irq_mask_ack(struct irq_data *d)
154{ 141{
142 unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
155 u32 reg; 143 u32 reg;
156 144
157 irq -= EMMA2RH_GPIO_IRQ_BASE;
158 emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq)); 145 emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq));
159 146
160 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK); 147 reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
@@ -164,10 +151,10 @@ static void emma2rh_gpio_irq_mask_ack(unsigned int irq)
164 151
165struct irq_chip emma2rh_gpio_irq_controller = { 152struct irq_chip emma2rh_gpio_irq_controller = {
166 .name = "emma2rh_gpio_irq", 153 .name = "emma2rh_gpio_irq",
167 .ack = emma2rh_gpio_irq_ack, 154 .irq_ack = emma2rh_gpio_irq_ack,
168 .mask = emma2rh_gpio_irq_disable, 155 .irq_mask = emma2rh_gpio_irq_disable,
169 .mask_ack = emma2rh_gpio_irq_mask_ack, 156 .irq_mask_ack = emma2rh_gpio_irq_mask_ack,
170 .unmask = emma2rh_gpio_irq_enable, 157 .irq_unmask = emma2rh_gpio_irq_enable,
171}; 158};
172 159
173void emma2rh_gpio_irq_init(void) 160void emma2rh_gpio_irq_init(void)
@@ -175,7 +162,7 @@ void emma2rh_gpio_irq_init(void)
175 u32 i; 162 u32 i;
176 163
177 for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++) 164 for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++)
178 set_irq_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, 165 irq_set_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i,
179 &emma2rh_gpio_irq_controller, 166 &emma2rh_gpio_irq_controller,
180 handle_edge_irq, "edge"); 167 handle_edge_irq, "edge");
181} 168}
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index e0aaad482b0..5314b37aff2 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -9,4 +9,4 @@ lib-$(CONFIG_ARC_MEMORY) += memory.o
9lib-$(CONFIG_ARC_CONSOLE) += arc_con.o 9lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
10lib-$(CONFIG_ARC_PROMLIB) += promlib.o 10lib-$(CONFIG_ARC_PROMLIB) += promlib.o
11 11
12EXTRA_CFLAGS += -Werror 12ccflags-y := -Werror
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index c508c00dbb6..b7f9dd3c93c 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1996 David S. Miller (dm@sgi.com) 6 * Copyright (C) 1996 David S. Miller (dm@sgi.com)
7 * Compability with board caches, Ulf Carlsson 7 * Compatibility with board caches, Ulf Carlsson
8 */ 8 */
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <asm/sgialib.h> 10#include <asm/sgialib.h>
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 50b4ef288c5..2e1ad4c652b 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -676,9 +676,8 @@ static inline int ffs(int word)
676#include <asm/arch_hweight.h> 676#include <asm/arch_hweight.h>
677#include <asm-generic/bitops/const_hweight.h> 677#include <asm-generic/bitops/const_hweight.h>
678 678
679#include <asm-generic/bitops/ext2-non-atomic.h> 679#include <asm-generic/bitops/le.h>
680#include <asm-generic/bitops/ext2-atomic.h> 680#include <asm-generic/bitops/ext2-atomic.h>
681#include <asm-generic/bitops/minix.h>
682 681
683#endif /* __KERNEL__ */ 682#endif /* __KERNEL__ */
684 683
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index b9c8203688d..c0ead631384 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -108,7 +108,7 @@ extern int (*__pmax_close)(int);
108 108
109/* 109/*
110 * On MIPS64 we have to call PROM functions via a helper 110 * On MIPS64 we have to call PROM functions via a helper
111 * dispatcher to accomodate ABI incompatibilities. 111 * dispatcher to accommodate ABI incompatibilities.
112 */ 112 */
113#define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \ 113#define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
114 __asm__(#fun " = call_o32") 114 __asm__(#fun " = call_o32")
diff --git a/arch/mips/include/asm/errno.h b/arch/mips/include/asm/errno.h
index a0efc73819e..6dcd3583ed0 100644
--- a/arch/mips/include/asm/errno.h
+++ b/arch/mips/include/asm/errno.h
@@ -121,6 +121,8 @@
121 121
122#define ERFKILL 167 /* Operation not possible due to RF-kill */ 122#define ERFKILL 167 /* Operation not possible due to RF-kill */
123 123
124#define EHWPOISON 168 /* Memory page has hardware error */
125
124#define EDQUOT 1133 /* Quota exceeded */ 126#define EDQUOT 1133 /* Quota exceeded */
125 127
126#ifdef __KERNEL__ 128#ifdef __KERNEL__
diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h
index 992d232adc8..c5c7c0e6064 100644
--- a/arch/mips/include/asm/floppy.h
+++ b/arch/mips/include/asm/floppy.h
@@ -24,7 +24,7 @@ static inline void fd_cacheflush(char * addr, long size)
24 * And on Mips's the CMOS info fails also ... 24 * And on Mips's the CMOS info fails also ...
25 * 25 *
26 * FIXME: This information should come from the ARC configuration tree 26 * FIXME: This information should come from the ARC configuration tree
27 * or whereever a particular machine has stored this ... 27 * or wherever a particular machine has stored this ...
28 */ 28 */
29#define FLOPPY0_TYPE fd_drive_type(0) 29#define FLOPPY0_TYPE fd_drive_type(0)
30#define FLOPPY1_TYPE fd_drive_type(1) 30#define FLOPPY1_TYPE fd_drive_type(1)
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index b9cce90346c..6ebf1734b41 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -75,7 +75,7 @@
75} 75}
76 76
77static inline int 77static inline int
78futex_atomic_op_inuser(int encoded_op, int __user *uaddr) 78futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
79{ 79{
80 int op = (encoded_op >> 28) & 7; 80 int op = (encoded_op >> 28) & 7;
81 int cmp = (encoded_op >> 24) & 15; 81 int cmp = (encoded_op >> 24) & 15;
@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
85 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 85 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
86 oparg = 1 << oparg; 86 oparg = 1 << oparg;
87 87
88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
89 return -EFAULT; 89 return -EFAULT;
90 90
91 pagefault_disable(); 91 pagefault_disable();
@@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
132} 132}
133 133
134static inline int 134static inline int
135futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 135futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
136 u32 oldval, u32 newval)
136{ 137{
137 int retval; 138 int ret = 0;
139 u32 val;
138 140
139 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 141 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
140 return -EFAULT; 142 return -EFAULT;
141 143
142 if (cpu_has_llsc && R10000_LLSC_WAR) { 144 if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
145 " .set push \n" 147 " .set push \n"
146 " .set noat \n" 148 " .set noat \n"
147 " .set mips3 \n" 149 " .set mips3 \n"
148 "1: ll %0, %2 \n" 150 "1: ll %1, %3 \n"
149 " bne %0, %z3, 3f \n" 151 " bne %1, %z4, 3f \n"
150 " .set mips0 \n" 152 " .set mips0 \n"
151 " move $1, %z4 \n" 153 " move $1, %z5 \n"
152 " .set mips3 \n" 154 " .set mips3 \n"
153 "2: sc $1, %1 \n" 155 "2: sc $1, %2 \n"
154 " beqzl $1, 1b \n" 156 " beqzl $1, 1b \n"
155 __WEAK_LLSC_MB 157 __WEAK_LLSC_MB
156 "3: \n" 158 "3: \n"
157 " .set pop \n" 159 " .set pop \n"
158 " .section .fixup,\"ax\" \n" 160 " .section .fixup,\"ax\" \n"
159 "4: li %0, %5 \n" 161 "4: li %0, %6 \n"
160 " j 3b \n" 162 " j 3b \n"
161 " .previous \n" 163 " .previous \n"
162 " .section __ex_table,\"a\" \n" 164 " .section __ex_table,\"a\" \n"
163 " "__UA_ADDR "\t1b, 4b \n" 165 " "__UA_ADDR "\t1b, 4b \n"
164 " "__UA_ADDR "\t2b, 4b \n" 166 " "__UA_ADDR "\t2b, 4b \n"
165 " .previous \n" 167 " .previous \n"
166 : "=&r" (retval), "=R" (*uaddr) 168 : "+r" (ret), "=&r" (val), "=R" (*uaddr)
167 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 169 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
168 : "memory"); 170 : "memory");
169 } else if (cpu_has_llsc) { 171 } else if (cpu_has_llsc) {
@@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
172 " .set push \n" 174 " .set push \n"
173 " .set noat \n" 175 " .set noat \n"
174 " .set mips3 \n" 176 " .set mips3 \n"
175 "1: ll %0, %2 \n" 177 "1: ll %1, %3 \n"
176 " bne %0, %z3, 3f \n" 178 " bne %1, %z4, 3f \n"
177 " .set mips0 \n" 179 " .set mips0 \n"
178 " move $1, %z4 \n" 180 " move $1, %z5 \n"
179 " .set mips3 \n" 181 " .set mips3 \n"
180 "2: sc $1, %1 \n" 182 "2: sc $1, %2 \n"
181 " beqz $1, 1b \n" 183 " beqz $1, 1b \n"
182 __WEAK_LLSC_MB 184 __WEAK_LLSC_MB
183 "3: \n" 185 "3: \n"
184 " .set pop \n" 186 " .set pop \n"
185 " .section .fixup,\"ax\" \n" 187 " .section .fixup,\"ax\" \n"
186 "4: li %0, %5 \n" 188 "4: li %0, %6 \n"
187 " j 3b \n" 189 " j 3b \n"
188 " .previous \n" 190 " .previous \n"
189 " .section __ex_table,\"a\" \n" 191 " .section __ex_table,\"a\" \n"
190 " "__UA_ADDR "\t1b, 4b \n" 192 " "__UA_ADDR "\t1b, 4b \n"
191 " "__UA_ADDR "\t2b, 4b \n" 193 " "__UA_ADDR "\t2b, 4b \n"
192 " .previous \n" 194 " .previous \n"
193 : "=&r" (retval), "=R" (*uaddr) 195 : "+r" (ret), "=&r" (val), "=R" (*uaddr)
194 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 196 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
195 : "memory"); 197 : "memory");
196 } else 198 } else
197 return -ENOSYS; 199 return -ENOSYS;
198 200
199 return retval; 201 *uval = val;
202 return ret;
200} 203}
201 204
202#endif 205#endif
diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
index aca05a43a97..77adda297ad 100644
--- a/arch/mips/include/asm/hw_irq.h
+++ b/arch/mips/include/asm/hw_irq.h
@@ -13,7 +13,7 @@
13extern atomic_t irq_err_count; 13extern atomic_t irq_err_count;
14 14
15/* 15/*
16 * interrupt-retrigger: NOP for now. This may not be apropriate for all 16 * interrupt-retrigger: NOP for now. This may not be appropriate for all
17 * machines, we'll see ... 17 * machines, we'll see ...
18 */ 18 */
19 19
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 48bb8237299..9ad011366f7 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,8 +12,13 @@
12#define PIT_CH0 0x40 12#define PIT_CH0 0x40
13#define PIT_CH2 0x42 13#define PIT_CH2 0x42
14 14
15#define PIT_LATCH LATCH
16
15extern raw_spinlock_t i8253_lock; 17extern raw_spinlock_t i8253_lock;
16 18
17extern void setup_pit_timer(void); 19extern void setup_pit_timer(void);
18 20
21#define inb_pit inb_p
22#define outb_pit outb_p
23
19#endif /* __ASM_I8253_H */ 24#endif /* __ASM_I8253_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 5b017f23e24..b04e4de5dd2 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -242,7 +242,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
242 * This version of ioremap ensures that the memory is marked uncachable 242 * This version of ioremap ensures that the memory is marked uncachable
243 * on the CPU as well as honouring existing caching rules from things like 243 * on the CPU as well as honouring existing caching rules from things like
244 * the PCI bus. Note that there are other caches and buffers on many 244 * the PCI bus. Note that there are other caches and buffers on many
245 * busses. In paticular driver authors should read up on PCI writes 245 * busses. In particular driver authors should read up on PCI writes
246 * 246 *
247 * It's useful if some control registers are in such an area and 247 * It's useful if some control registers are in such an area and
248 * write combining or read caching is not desirable: 248 * write combining or read caching is not desirable:
diff --git a/arch/mips/include/asm/ioctls.h b/arch/mips/include/asm/ioctls.h
index d967b899762..92403c3d600 100644
--- a/arch/mips/include/asm/ioctls.h
+++ b/arch/mips/include/asm/ioctls.h
@@ -85,6 +85,7 @@
85#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ 85#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
86#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ 86#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
87#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ 87#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */
88#define TIOCVHANGUP 0x5437
88 89
89/* I hope the range from 0x5480 on is free ... */ 90/* I hope the range from 0x5480 on is free ... */
90#define TIOCSCTTY 0x5480 /* become controlling tty */ 91#define TIOCSCTTY 0x5480 /* become controlling tty */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index b003ed52ed1..0ec01294b06 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -55,9 +55,9 @@ static inline void smtc_im_ack_irq(unsigned int irq)
55#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 55#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
56#include <linux/cpumask.h> 56#include <linux/cpumask.h>
57 57
58extern int plat_set_irq_affinity(unsigned int irq, 58extern int plat_set_irq_affinity(struct irq_data *d,
59 const struct cpumask *affinity); 59 const struct cpumask *affinity, bool force);
60extern void smtc_forward_irq(unsigned int irq); 60extern void smtc_forward_irq(struct irq_data *d);
61 61
62/* 62/*
63 * IRQ affinity hook invoked at the beginning of interrupt dispatch 63 * IRQ affinity hook invoked at the beginning of interrupt dispatch
@@ -70,51 +70,53 @@ extern void smtc_forward_irq(unsigned int irq);
70 * cpumask implementations, this version is optimistically assuming 70 * cpumask implementations, this version is optimistically assuming
71 * that cpumask.h macro overhead is reasonable during interrupt dispatch. 71 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
72 */ 72 */
73#define IRQ_AFFINITY_HOOK(irq) \ 73static inline int handle_on_other_cpu(unsigned int irq)
74do { \ 74{
75 if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ 75 struct irq_data *d = irq_get_irq_data(irq);
76 smtc_forward_irq(irq); \ 76
77 irq_exit(); \ 77 if (cpumask_test_cpu(smp_processor_id(), d->affinity))
78 return; \ 78 return 0;
79 } \ 79 smtc_forward_irq(d);
80} while (0) 80 return 1;
81}
81 82
82#else /* Not doing SMTC affinity */ 83#else /* Not doing SMTC affinity */
83 84
84#define IRQ_AFFINITY_HOOK(irq) do { } while (0) 85static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
85 86
86#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 87#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
87 88
88#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 89#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
89 90
91static inline void smtc_im_backstop(unsigned int irq)
92{
93 if (irq_hwmask[irq] & 0x0000ff00)
94 write_c0_tccontext(read_c0_tccontext() &
95 ~(irq_hwmask[irq] & 0x0000ff00));
96}
97
90/* 98/*
91 * Clear interrupt mask handling "backstop" if irq_hwmask 99 * Clear interrupt mask handling "backstop" if irq_hwmask
92 * entry so indicates. This implies that the ack() or end() 100 * entry so indicates. This implies that the ack() or end()
93 * functions will take over re-enabling the low-level mask. 101 * functions will take over re-enabling the low-level mask.
94 * Otherwise it will be done on return from exception. 102 * Otherwise it will be done on return from exception.
95 */ 103 */
96#define __DO_IRQ_SMTC_HOOK(irq) \ 104static inline int smtc_handle_on_other_cpu(unsigned int irq)
97do { \ 105{
98 IRQ_AFFINITY_HOOK(irq); \ 106 int ret = handle_on_other_cpu(irq);
99 if (irq_hwmask[irq] & 0x0000ff00) \ 107
100 write_c0_tccontext(read_c0_tccontext() & \ 108 if (!ret)
101 ~(irq_hwmask[irq] & 0x0000ff00)); \ 109 smtc_im_backstop(irq);
102} while (0) 110 return ret;
103 111}
104#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
105do { \
106 if (irq_hwmask[irq] & 0x0000ff00) \
107 write_c0_tccontext(read_c0_tccontext() & \
108 ~(irq_hwmask[irq] & 0x0000ff00)); \
109} while (0)
110 112
111#else 113#else
112 114
113#define __DO_IRQ_SMTC_HOOK(irq) \ 115static inline void smtc_im_backstop(unsigned int irq) { }
114do { \ 116static inline int smtc_handle_on_other_cpu(unsigned int irq)
115 IRQ_AFFINITY_HOOK(irq); \ 117{
116} while (0) 118 return handle_on_other_cpu(irq);
117#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 119}
118 120
119#endif 121#endif
120 122
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9ef3b0d1789..309cbcd6909 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -174,7 +174,7 @@ __asm__(
174 "mtc0 \\flags, $2, 1 \n" 174 "mtc0 \\flags, $2, 1 \n"
175#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 175#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
176 /* 176 /*
177 * Slow, but doesn't suffer from a relativly unlikely race 177 * Slow, but doesn't suffer from a relatively unlikely race
178 * condition we're having since days 1. 178 * condition we're having since days 1.
179 */ 179 */
180 " beqz \\flags, 1f \n" 180 " beqz \\flags, 1f \n"
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
index 5325084d5c4..32978d32561 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -4,7 +4,7 @@
4#define TAGVER_LEN 4 /* Length of Tag Version */ 4#define TAGVER_LEN 4 /* Length of Tag Version */
5#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */ 5#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
6#define SIG1_LEN 20 /* Company Signature 1 Length */ 6#define SIG1_LEN 20 /* Company Signature 1 Length */
7#define SIG2_LEN 14 /* Company Signature 2 Lenght */ 7#define SIG2_LEN 14 /* Company Signature 2 Length */
8#define BOARDID_LEN 16 /* Length of BoardId */ 8#define BOARDID_LEN 16 /* Length of BoardId */
9#define ENDIANFLAG_LEN 2 /* Endian Flag Length */ 9#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
10#define CHIPID_LEN 6 /* Chip Id Length */ 10#define CHIPID_LEN 6 /* Chip Id Length */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 6ddab8aef64..5b05f186e39 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -11,172 +11,91 @@
11#define NR_IRQS OCTEON_IRQ_LAST 11#define NR_IRQS OCTEON_IRQ_LAST
12#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0 12#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0
13 13
14/* 0 - 7 represent the i8259 master */ 14enum octeon_irq {
15#define OCTEON_IRQ_I8259M0 0 15/* 1 - 8 represent the 8 MIPS standard interrupt sources */
16#define OCTEON_IRQ_I8259M1 1 16 OCTEON_IRQ_SW0 = 1,
17#define OCTEON_IRQ_I8259M2 2 17 OCTEON_IRQ_SW1,
18#define OCTEON_IRQ_I8259M3 3 18/* CIU0, CUI2, CIU4 are 3, 4, 5 */
19#define OCTEON_IRQ_I8259M4 4 19 OCTEON_IRQ_5 = 6,
20#define OCTEON_IRQ_I8259M5 5 20 OCTEON_IRQ_PERF,
21#define OCTEON_IRQ_I8259M6 6 21 OCTEON_IRQ_TIMER,
22#define OCTEON_IRQ_I8259M7 7 22/* sources in CIU_INTX_EN0 */
23/* 8 - 15 represent the i8259 slave */ 23 OCTEON_IRQ_WORKQ0,
24#define OCTEON_IRQ_I8259S0 8 24 OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
25#define OCTEON_IRQ_I8259S1 9 25 OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
26#define OCTEON_IRQ_I8259S2 10 26 OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
27#define OCTEON_IRQ_I8259S3 11 27 OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
28#define OCTEON_IRQ_I8259S4 12 28 OCTEON_IRQ_MBOX1,
29#define OCTEON_IRQ_I8259S5 13 29 OCTEON_IRQ_UART0,
30#define OCTEON_IRQ_I8259S6 14 30 OCTEON_IRQ_UART1,
31#define OCTEON_IRQ_I8259S7 15 31 OCTEON_IRQ_UART2,
32/* 16 - 23 represent the 8 MIPS standard interrupt sources */ 32 OCTEON_IRQ_PCI_INT0,
33#define OCTEON_IRQ_SW0 16 33 OCTEON_IRQ_PCI_INT1,
34#define OCTEON_IRQ_SW1 17 34 OCTEON_IRQ_PCI_INT2,
35#define OCTEON_IRQ_CIU0 18 35 OCTEON_IRQ_PCI_INT3,
36#define OCTEON_IRQ_CIU1 19 36 OCTEON_IRQ_PCI_MSI0,
37#define OCTEON_IRQ_CIU4 20 37 OCTEON_IRQ_PCI_MSI1,
38#define OCTEON_IRQ_5 21 38 OCTEON_IRQ_PCI_MSI2,
39#define OCTEON_IRQ_PERF 22 39 OCTEON_IRQ_PCI_MSI3,
40#define OCTEON_IRQ_TIMER 23 40
41/* 24 - 87 represent the sources in CIU_INTX_EN0 */ 41 OCTEON_IRQ_TWSI,
42#define OCTEON_IRQ_WORKQ0 24 42 OCTEON_IRQ_TWSI2,
43#define OCTEON_IRQ_WORKQ1 25 43 OCTEON_IRQ_RML,
44#define OCTEON_IRQ_WORKQ2 26 44 OCTEON_IRQ_TRACE0,
45#define OCTEON_IRQ_WORKQ3 27 45 OCTEON_IRQ_GMX_DRP0 = OCTEON_IRQ_TRACE0 + 4,
46#define OCTEON_IRQ_WORKQ4 28 46 OCTEON_IRQ_IPD_DRP = OCTEON_IRQ_GMX_DRP0 + 5,
47#define OCTEON_IRQ_WORKQ5 29 47 OCTEON_IRQ_KEY_ZERO,
48#define OCTEON_IRQ_WORKQ6 30 48 OCTEON_IRQ_TIMER0,
49#define OCTEON_IRQ_WORKQ7 31 49 OCTEON_IRQ_TIMER1,
50#define OCTEON_IRQ_WORKQ8 32 50 OCTEON_IRQ_TIMER2,
51#define OCTEON_IRQ_WORKQ9 33 51 OCTEON_IRQ_TIMER3,
52#define OCTEON_IRQ_WORKQ10 34 52 OCTEON_IRQ_USB0,
53#define OCTEON_IRQ_WORKQ11 35 53 OCTEON_IRQ_USB1,
54#define OCTEON_IRQ_WORKQ12 36 54 OCTEON_IRQ_PCM,
55#define OCTEON_IRQ_WORKQ13 37 55 OCTEON_IRQ_MPI,
56#define OCTEON_IRQ_WORKQ14 38 56 OCTEON_IRQ_POWIQ,
57#define OCTEON_IRQ_WORKQ15 39 57 OCTEON_IRQ_IPDPPTHR,
58#define OCTEON_IRQ_GPIO0 40 58 OCTEON_IRQ_MII0,
59#define OCTEON_IRQ_GPIO1 41 59 OCTEON_IRQ_MII1,
60#define OCTEON_IRQ_GPIO2 42 60 OCTEON_IRQ_BOOTDMA,
61#define OCTEON_IRQ_GPIO3 43 61
62#define OCTEON_IRQ_GPIO4 44 62 OCTEON_IRQ_NAND,
63#define OCTEON_IRQ_GPIO5 45 63 OCTEON_IRQ_MIO, /* Summary of MIO_BOOT_ERR */
64#define OCTEON_IRQ_GPIO6 46 64 OCTEON_IRQ_IOB, /* Summary of IOB_INT_SUM */
65#define OCTEON_IRQ_GPIO7 47 65 OCTEON_IRQ_FPA, /* Summary of FPA_INT_SUM */
66#define OCTEON_IRQ_GPIO8 48 66 OCTEON_IRQ_POW, /* Summary of POW_ECC_ERR */
67#define OCTEON_IRQ_GPIO9 49 67 OCTEON_IRQ_L2C, /* Summary of L2C_INT_STAT */
68#define OCTEON_IRQ_GPIO10 50 68 OCTEON_IRQ_IPD, /* Summary of IPD_INT_SUM */
69#define OCTEON_IRQ_GPIO11 51 69 OCTEON_IRQ_PIP, /* Summary of PIP_INT_REG */
70#define OCTEON_IRQ_GPIO12 52 70 OCTEON_IRQ_PKO, /* Summary of PKO_REG_ERROR */
71#define OCTEON_IRQ_GPIO13 53 71 OCTEON_IRQ_ZIP, /* Summary of ZIP_ERROR */
72#define OCTEON_IRQ_GPIO14 54 72 OCTEON_IRQ_TIM, /* Summary of TIM_REG_ERROR */
73#define OCTEON_IRQ_GPIO15 55 73 OCTEON_IRQ_RAD, /* Summary of RAD_REG_ERROR */
74#define OCTEON_IRQ_MBOX0 56 74 OCTEON_IRQ_KEY, /* Summary of KEY_INT_SUM */
75#define OCTEON_IRQ_MBOX1 57 75 OCTEON_IRQ_DFA, /* Summary of DFA */
76#define OCTEON_IRQ_UART0 58 76 OCTEON_IRQ_USBCTL, /* Summary of USBN0_INT_SUM */
77#define OCTEON_IRQ_UART1 59 77 OCTEON_IRQ_SLI, /* Summary of SLI_INT_SUM */
78#define OCTEON_IRQ_PCI_INT0 60 78 OCTEON_IRQ_DPI, /* Summary of DPI_INT_SUM */
79#define OCTEON_IRQ_PCI_INT1 61 79 OCTEON_IRQ_AGX0, /* Summary of GMX0*+PCS0_INT*_REG */
80#define OCTEON_IRQ_PCI_INT2 62 80 OCTEON_IRQ_AGL = OCTEON_IRQ_AGX0 + 5,
81#define OCTEON_IRQ_PCI_INT3 63 81 OCTEON_IRQ_PTP,
82#define OCTEON_IRQ_PCI_MSI0 64 82 OCTEON_IRQ_PEM0,
83#define OCTEON_IRQ_PCI_MSI1 65 83 OCTEON_IRQ_PEM1,
84#define OCTEON_IRQ_PCI_MSI2 66 84 OCTEON_IRQ_SRIO0,
85#define OCTEON_IRQ_PCI_MSI3 67 85 OCTEON_IRQ_SRIO1,
86#define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */ 86 OCTEON_IRQ_LMC0,
87#define OCTEON_IRQ_TWSI 69 87 OCTEON_IRQ_DFM = OCTEON_IRQ_LMC0 + 4, /* Summary of DFM */
88#define OCTEON_IRQ_RML 70 88 OCTEON_IRQ_RST,
89#define OCTEON_IRQ_TRACE 71 89};
90#define OCTEON_IRQ_GMX_DRP0 72
91#define OCTEON_IRQ_GMX_DRP1 73
92#define OCTEON_IRQ_IPD_DRP 74
93#define OCTEON_IRQ_KEY_ZERO 75
94#define OCTEON_IRQ_TIMER0 76
95#define OCTEON_IRQ_TIMER1 77
96#define OCTEON_IRQ_TIMER2 78
97#define OCTEON_IRQ_TIMER3 79
98#define OCTEON_IRQ_USB0 80
99#define OCTEON_IRQ_PCM 81
100#define OCTEON_IRQ_MPI 82
101#define OCTEON_IRQ_TWSI2 83
102#define OCTEON_IRQ_POWIQ 84
103#define OCTEON_IRQ_IPDPPTHR 85
104#define OCTEON_IRQ_MII0 86
105#define OCTEON_IRQ_BOOTDMA 87
106/* 88 - 151 represent the sources in CIU_INTX_EN1 */
107#define OCTEON_IRQ_WDOG0 88
108#define OCTEON_IRQ_WDOG1 89
109#define OCTEON_IRQ_WDOG2 90
110#define OCTEON_IRQ_WDOG3 91
111#define OCTEON_IRQ_WDOG4 92
112#define OCTEON_IRQ_WDOG5 93
113#define OCTEON_IRQ_WDOG6 94
114#define OCTEON_IRQ_WDOG7 95
115#define OCTEON_IRQ_WDOG8 96
116#define OCTEON_IRQ_WDOG9 97
117#define OCTEON_IRQ_WDOG10 98
118#define OCTEON_IRQ_WDOG11 99
119#define OCTEON_IRQ_WDOG12 100
120#define OCTEON_IRQ_WDOG13 101
121#define OCTEON_IRQ_WDOG14 102
122#define OCTEON_IRQ_WDOG15 103
123#define OCTEON_IRQ_UART2 104
124#define OCTEON_IRQ_USB1 105
125#define OCTEON_IRQ_MII1 106
126#define OCTEON_IRQ_RESERVED107 107
127#define OCTEON_IRQ_RESERVED108 108
128#define OCTEON_IRQ_RESERVED109 109
129#define OCTEON_IRQ_RESERVED110 110
130#define OCTEON_IRQ_RESERVED111 111
131#define OCTEON_IRQ_RESERVED112 112
132#define OCTEON_IRQ_RESERVED113 113
133#define OCTEON_IRQ_RESERVED114 114
134#define OCTEON_IRQ_RESERVED115 115
135#define OCTEON_IRQ_RESERVED116 116
136#define OCTEON_IRQ_RESERVED117 117
137#define OCTEON_IRQ_RESERVED118 118
138#define OCTEON_IRQ_RESERVED119 119
139#define OCTEON_IRQ_RESERVED120 120
140#define OCTEON_IRQ_RESERVED121 121
141#define OCTEON_IRQ_RESERVED122 122
142#define OCTEON_IRQ_RESERVED123 123
143#define OCTEON_IRQ_RESERVED124 124
144#define OCTEON_IRQ_RESERVED125 125
145#define OCTEON_IRQ_RESERVED126 126
146#define OCTEON_IRQ_RESERVED127 127
147#define OCTEON_IRQ_RESERVED128 128
148#define OCTEON_IRQ_RESERVED129 129
149#define OCTEON_IRQ_RESERVED130 130
150#define OCTEON_IRQ_RESERVED131 131
151#define OCTEON_IRQ_RESERVED132 132
152#define OCTEON_IRQ_RESERVED133 133
153#define OCTEON_IRQ_RESERVED134 134
154#define OCTEON_IRQ_RESERVED135 135
155#define OCTEON_IRQ_RESERVED136 136
156#define OCTEON_IRQ_RESERVED137 137
157#define OCTEON_IRQ_RESERVED138 138
158#define OCTEON_IRQ_RESERVED139 139
159#define OCTEON_IRQ_RESERVED140 140
160#define OCTEON_IRQ_RESERVED141 141
161#define OCTEON_IRQ_RESERVED142 142
162#define OCTEON_IRQ_RESERVED143 143
163#define OCTEON_IRQ_RESERVED144 144
164#define OCTEON_IRQ_RESERVED145 145
165#define OCTEON_IRQ_RESERVED146 146
166#define OCTEON_IRQ_RESERVED147 147
167#define OCTEON_IRQ_RESERVED148 148
168#define OCTEON_IRQ_RESERVED149 149
169#define OCTEON_IRQ_RESERVED150 150
170#define OCTEON_IRQ_RESERVED151 151
171 90
172#ifdef CONFIG_PCI_MSI 91#ifdef CONFIG_PCI_MSI
173/* 152 - 215 represent the MSI interrupts 0-63 */ 92/* 152 - 407 represent the MSI interrupts 0-255 */
174#define OCTEON_IRQ_MSI_BIT0 152 93#define OCTEON_IRQ_MSI_BIT0 (OCTEON_IRQ_RST + 1)
175#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
176 94
177#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1) 95#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
96#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
178#else 97#else
179#define OCTEON_IRQ_LAST 152 98#define OCTEON_IRQ_LAST (OCTEON_IRQ_RST + 1)
180#endif 99#endif
181 100
182#endif 101#endif
diff --git a/arch/mips/include/asm/mach-ip32/mc146818rtc.h b/arch/mips/include/asm/mach-ip32/mc146818rtc.h
index c28ba8d8407..6b6bab43d5c 100644
--- a/arch/mips/include/asm/mach-ip32/mc146818rtc.h
+++ b/arch/mips/include/asm/mach-ip32/mc146818rtc.h
@@ -26,7 +26,7 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
26} 26}
27 27
28/* 28/*
29 * FIXME: Do it right. For now just assume that noone lives in 20th century 29 * FIXME: Do it right. For now just assume that no one lives in 20th century
30 * and no O2 user in 22th century ;-) 30 * and no O2 user in 22th century ;-)
31 */ 31 */
32#define mc146818_decode_year(year) ((year) + 2000) 32#define mc146818_decode_year(year) ((year) + 2000)
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 8987a76e967..564ab81d6cd 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -30,6 +30,7 @@ extern struct platform_device jz4740_i2s_device;
30extern struct platform_device jz4740_pcm_device; 30extern struct platform_device jz4740_pcm_device;
31extern struct platform_device jz4740_codec_device; 31extern struct platform_device jz4740_codec_device;
32extern struct platform_device jz4740_adc_device; 32extern struct platform_device jz4740_adc_device;
33extern struct platform_device jz4740_wdt_device;
33 34
34void jz4740_serial_device_register(void); 35void jz4740_serial_device_register(void);
35 36
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
index 021f77ca59e..2a8e2bb5d53 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * The header file of cs5536 sourth bridge. 2 * The header file of cs5536 south bridge.
3 * 3 *
4 * Copyright (C) 2007 Lemote, Inc. 4 * Copyright (C) 2007 Lemote, Inc.
5 * Author : jlliu <liujl@lemote.com> 5 * Author : jlliu <liujl@lemote.com>
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1000.h b/arch/mips/include/asm/mach-pb1x00/pb1000.h
index 6d1ff9060e4..65059255dc1 100644
--- a/arch/mips/include/asm/mach-pb1x00/pb1000.h
+++ b/arch/mips/include/asm/mach-pb1x00/pb1000.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Alchemy Semi Pb1000 Referrence Board 2 * Alchemy Semi Pb1000 Reference Board
3 * 3 *
4 * Copyright 2001, 2008 MontaVista Software Inc. 4 * Copyright 2001, 2008 MontaVista Software Inc.
5 * Author: MontaVista Software, Inc. <source@mvista.com> 5 * Author: MontaVista Software, Inc. <source@mvista.com>
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1200.h b/arch/mips/include/asm/mach-pb1x00/pb1200.h
index 962eb55dc88..fce4332ebb7 100644
--- a/arch/mips/include/asm/mach-pb1x00/pb1200.h
+++ b/arch/mips/include/asm/mach-pb1x00/pb1200.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * AMD Alchemy Pb1200 Referrence Board 2 * AMD Alchemy Pb1200 Reference Board
3 * Board Registers defines. 3 * Board Registers defines.
4 * 4 *
5 * ######################################################################## 5 * ########################################################################
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1550.h b/arch/mips/include/asm/mach-pb1x00/pb1550.h
index fc4d766641c..f835c88e959 100644
--- a/arch/mips/include/asm/mach-pb1x00/pb1550.h
+++ b/arch/mips/include/asm/mach-pb1x00/pb1550.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * AMD Alchemy Semi PB1550 Referrence Board 2 * AMD Alchemy Semi PB1550 Reference Board
3 * Board Registers defines. 3 * Board Registers defines.
4 * 4 *
5 * Copyright 2004 Embedded Edge LLC. 5 * Copyright 2004 Embedded Edge LLC.
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index f76029c2406..a8e72cf1214 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -48,7 +48,7 @@ static inline unsigned long virt_to_phys_from_pte(void *addr)
48 /* check for a valid page */ 48 /* check for a valid page */
49 if (pte_present(pte)) { 49 if (pte_present(pte)) {
50 /* get the physical address the page is 50 /* get the physical address the page is
51 * refering to */ 51 * referring to */
52 phys_addr = (unsigned long) 52 phys_addr = (unsigned long)
53 page_to_phys(pte_page(pte)); 53 page_to_phys(pte_page(pte));
54 /* add the offset within the page */ 54 /* add the offset within the page */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 4d987097538..6a6f8a8f542 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -922,7 +922,7 @@ do { \
922#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 922#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
923 923
924/* 924/*
925 * The WatchLo register. There may be upto 8 of them. 925 * The WatchLo register. There may be up to 8 of them.
926 */ 926 */
927#define read_c0_watchlo0() __read_ulong_c0_register($18, 0) 927#define read_c0_watchlo0() __read_ulong_c0_register($18, 0)
928#define read_c0_watchlo1() __read_ulong_c0_register($18, 1) 928#define read_c0_watchlo1() __read_ulong_c0_register($18, 1)
@@ -942,7 +942,7 @@ do { \
942#define write_c0_watchlo7(val) __write_ulong_c0_register($18, 7, val) 942#define write_c0_watchlo7(val) __write_ulong_c0_register($18, 7, val)
943 943
944/* 944/*
945 * The WatchHi register. There may be upto 8 of them. 945 * The WatchHi register. There may be up to 8 of them.
946 */ 946 */
947#define read_c0_watchhi0() __read_32bit_c0_register($19, 0) 947#define read_c0_watchhi0() __read_32bit_c0_register($19, 0)
948#define read_c0_watchhi1() __read_32bit_c0_register($19, 1) 948#define read_c0_watchhi1() __read_32bit_c0_register($19, 1)
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index f3c23a43f84..4e4c3a8282d 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -200,7 +200,7 @@ enum cvmx_chip_types_enum {
200 CVMX_CHIP_TYPE_MAX, 200 CVMX_CHIP_TYPE_MAX,
201}; 201};
202 202
203/* Compatability alias for NAC38 name change, planned to be removed 203/* Compatibility alias for NAC38 name change, planned to be removed
204 * from SDK 1.7 */ 204 * from SDK 1.7 */
205#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38 205#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
206 206
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 8e708bdb43f..877845b84b1 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -67,7 +67,7 @@ struct cvmx_bootmem_block_header {
67 67
68/* 68/*
69 * Structure for named memory blocks. Number of descriptors available 69 * Structure for named memory blocks. Number of descriptors available
70 * can be changed without affecting compatiblity, but name length 70 * can be changed without affecting compatibility, but name length
71 * changes require a bump in the bootmem descriptor version Note: This 71 * changes require a bump in the bootmem descriptor version Note: This
72 * structure must be naturally 64 bit aligned, as a single memory 72 * structure must be naturally 64 bit aligned, as a single memory
73 * image will be used by both 32 and 64 bit programs. 73 * image will be used by both 32 and 64 bit programs.
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 0b32c5b118e..2c8ff9e33ec 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -157,7 +157,7 @@ enum cvmx_l2c_tad_event {
157 157
158/** 158/**
159 * Configure one of the four L2 Cache performance counters to capture event 159 * Configure one of the four L2 Cache performance counters to capture event
160 * occurences. 160 * occurrences.
161 * 161 *
162 * @counter: The counter to configure. Range 0..3. 162 * @counter: The counter to configure. Range 0..3.
163 * @event: The type of L2 Cache event occurrence to count. 163 * @event: The type of L2 Cache event occurrence to count.
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 9d9381e2e3d..7e1286706d4 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -151,7 +151,7 @@ enum cvmx_mips_space {
151#endif 151#endif
152 152
153/** 153/**
154 * Convert a memory pointer (void*) into a hardware compatable 154 * Convert a memory pointer (void*) into a hardware compatible
155 * memory address (uint64_t). Octeon hardware widgets don't 155 * memory address (uint64_t). Octeon hardware widgets don't
156 * understand logical addresses. 156 * understand logical addresses.
157 * 157 *
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 6b34afd0d4e..f72f768cd3a 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -257,4 +257,6 @@ extern struct cvmx_bootinfo *octeon_bootinfo;
257 257
258extern uint64_t octeon_bootloader_entry_addr; 258extern uint64_t octeon_bootloader_entry_addr;
259 259
260extern void (*octeon_irq_setup_secondary)(void);
261
260#endif /* __ASM_OCTEON_OCTEON_H */ 262#endif /* __ASM_OCTEON_OCTEON_H */
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
index c2394f8b0fe..9ce5a1e7e14 100644
--- a/arch/mips/include/asm/paccess.h
+++ b/arch/mips/include/asm/paccess.h
@@ -7,7 +7,7 @@
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * 8 *
9 * Protected memory access. Used for everything that might take revenge 9 * Protected memory access. Used for everything that might take revenge
10 * by sending a DBE error like accessing possibly non-existant memory or 10 * by sending a DBE error like accessing possibly non-existent memory or
11 * devices. 11 * devices.
12 */ 12 */
13#ifndef _ASM_PACCESS_H 13#ifndef _ASM_PACCESS_H
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index f1f508e4f97..be44fb0266d 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -262,7 +262,7 @@ typedef volatile struct bridge_s {
262} bridge_t; 262} bridge_t;
263 263
264/* 264/*
265 * Field formats for Error Command Word and Auxillary Error Command Word 265 * Field formats for Error Command Word and Auxiliary Error Command Word
266 * of bridge. 266 * of bridge.
267 */ 267 */
268typedef struct bridge_err_cmdword_s { 268typedef struct bridge_err_cmdword_s {
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
index e00007cf816..d0c77496c72 100644
--- a/arch/mips/include/asm/perf_event.h
+++ b/arch/mips/include/asm/perf_event.h
@@ -11,15 +11,5 @@
11 11
12#ifndef __MIPS_PERF_EVENT_H__ 12#ifndef __MIPS_PERF_EVENT_H__
13#define __MIPS_PERF_EVENT_H__ 13#define __MIPS_PERF_EVENT_H__
14 14/* Leave it empty here. The file is required by linux/perf_event.h */
15/*
16 * MIPS performance counters do not raise NMI upon overflow, a regular
17 * interrupt will be signaled. Hence we can do the pending perf event
18 * work at the tail of the irq handler.
19 */
20static inline void
21set_perf_event_pending(void)
22{
23}
24
25#endif /* __MIPS_PERF_EVENT_H__ */ 15#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h b/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h
new file mode 100644
index 00000000000..a80801b094b
--- /dev/null
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h
@@ -0,0 +1,21 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
9#define __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
10
11#define cpu_has_mips16 1
12#define cpu_has_dsp 1
13#define cpu_has_mipsmt 1
14#define cpu_has_fpu 0
15
16#define cpu_has_mips32r1 0
17#define cpu_has_mips32r2 1
18#define cpu_has_mips64r1 0
19#define cpu_has_mips64r2 0
20
21#endif /* __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h
new file mode 100644
index 00000000000..156f320c69e
--- /dev/null
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h
@@ -0,0 +1,343 @@
1/*
2 *
3 * Macros for external SMP-safe access to the PMC MSP71xx reference
4 * board GPIO pins
5 *
6 * Copyright 2010 PMC-Sierra, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __MSP_GPIO_MACROS_H__
30#define __MSP_GPIO_MACROS_H__
31
32#include <msp_regops.h>
33#include <msp_regs.h>
34
35#ifdef CONFIG_PMC_MSP7120_GW
36#define MSP_NUM_GPIOS 20
37#else
38#define MSP_NUM_GPIOS 28
39#endif
40
41/* -- GPIO Enumerations -- */
42enum msp_gpio_data {
43 MSP_GPIO_LO = 0,
44 MSP_GPIO_HI = 1,
45 MSP_GPIO_NONE, /* Special - Means pin is out of range */
46 MSP_GPIO_TOGGLE, /* Special - Sets pin to opposite */
47};
48
49enum msp_gpio_mode {
50 MSP_GPIO_INPUT = 0x0,
51 /* MSP_GPIO_ INTERRUPT = 0x1, Not supported yet */
52 MSP_GPIO_UART_INPUT = 0x2, /* Only GPIO 4 or 5 */
53 MSP_GPIO_OUTPUT = 0x8,
54 MSP_GPIO_UART_OUTPUT = 0x9, /* Only GPIO 2 or 3 */
55 MSP_GPIO_PERIF_TIMERA = 0x9, /* Only GPIO 0 or 1 */
56 MSP_GPIO_PERIF_TIMERB = 0xa, /* Only GPIO 0 or 1 */
57 MSP_GPIO_UNKNOWN = 0xb, /* No such GPIO or mode */
58};
59
60/* -- Static Tables -- */
61
62/* Maps pins to data register */
63static volatile u32 * const MSP_GPIO_DATA_REGISTER[] = {
64 /* GPIO 0 and 1 on the first register */
65 GPIO_DATA1_REG, GPIO_DATA1_REG,
66 /* GPIO 2, 3, 4, and 5 on the second register */
67 GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG,
68 /* GPIO 6, 7, 8, and 9 on the third register */
69 GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG,
70 /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
71 GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG,
72 GPIO_DATA4_REG, GPIO_DATA4_REG,
73 /* GPIO 16 - 23 on the first strange EXTENDED register */
74 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
75 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
76 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
77 /* GPIO 24 - 27 on the second strange EXTENDED register */
78 EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
79 EXTENDED_GPIO2_REG,
80};
81
82/* Maps pins to mode register */
83static volatile u32 * const MSP_GPIO_MODE_REGISTER[] = {
84 /* GPIO 0 and 1 on the first register */
85 GPIO_CFG1_REG, GPIO_CFG1_REG,
86 /* GPIO 2, 3, 4, and 5 on the second register */
87 GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG,
88 /* GPIO 6, 7, 8, and 9 on the third register */
89 GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG,
90 /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
91 GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG,
92 GPIO_CFG4_REG, GPIO_CFG4_REG,
93 /* GPIO 16 - 23 on the first strange EXTENDED register */
94 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
95 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
96 EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
97 /* GPIO 24 - 27 on the second strange EXTENDED register */
98 EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
99 EXTENDED_GPIO2_REG,
100};
101
102/* Maps 'basic' pins to relative offset from 0 per register */
103static int MSP_GPIO_OFFSET[] = {
104 /* GPIO 0 and 1 on the first register */
105 0, 0,
106 /* GPIO 2, 3, 4, and 5 on the second register */
107 2, 2, 2, 2,
108 /* GPIO 6, 7, 8, and 9 on the third register */
109 6, 6, 6, 6,
110 /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
111 10, 10, 10, 10, 10, 10,
112};
113
114/* Maps MODE to allowed pin mask */
115static unsigned int MSP_GPIO_MODE_ALLOWED[] = {
116 0xffffffff, /* Mode 0 - INPUT */
117 0x00000, /* Mode 1 - INTERRUPT */
118 0x00030, /* Mode 2 - UART_INPUT (GPIO 4, 5)*/
119 0, 0, 0, 0, 0, /* Modes 3, 4, 5, 6, and 7 are reserved */
120 0xffffffff, /* Mode 8 - OUTPUT */
121 0x0000f, /* Mode 9 - UART_OUTPUT/
122 PERF_TIMERA (GPIO 0, 1, 2, 3) */
123 0x00003, /* Mode a - PERF_TIMERB (GPIO 0, 1) */
124 0x00000, /* Mode b - Not really a mode! */
125};
126
127/* -- Bit masks -- */
128
129/* This gives you the 'register relative offset gpio' number */
130#define OFFSET_GPIO_NUMBER(gpio) (gpio - MSP_GPIO_OFFSET[gpio])
131
132/* These take the 'register relative offset gpio' number */
133#define BASIC_DATA_REG_MASK(ogpio) (1 << ogpio)
134#define BASIC_MODE_REG_VALUE(mode, ogpio) \
135 (mode << BASIC_MODE_REG_SHIFT(ogpio))
136#define BASIC_MODE_REG_MASK(ogpio) \
137 BASIC_MODE_REG_VALUE(0xf, ogpio)
138#define BASIC_MODE_REG_SHIFT(ogpio) (ogpio * 4)
139#define BASIC_MODE_REG_FROM_REG(data, ogpio) \
140 ((data & BASIC_MODE_REG_MASK(ogpio)) >> BASIC_MODE_REG_SHIFT(ogpio))
141
142/* These take the actual GPIO number (0 through 15) */
143#define BASIC_DATA_MASK(gpio) \
144 BASIC_DATA_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
145#define BASIC_MODE_MASK(gpio) \
146 BASIC_MODE_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
147#define BASIC_MODE(mode, gpio) \
148 BASIC_MODE_REG_VALUE(mode, OFFSET_GPIO_NUMBER(gpio))
149#define BASIC_MODE_SHIFT(gpio) \
150 BASIC_MODE_REG_SHIFT(OFFSET_GPIO_NUMBER(gpio))
151#define BASIC_MODE_FROM_REG(data, gpio) \
152 BASIC_MODE_REG_FROM_REG(data, OFFSET_GPIO_NUMBER(gpio))
153
154/*
155 * Each extended GPIO register is 32 bits long and is responsible for up to
156 * eight GPIOs. The least significant 16 bits contain the set and clear bit
157 * pair for each of the GPIOs. The most significant 16 bits contain the
158 * disable and enable bit pair for each of the GPIOs. For example, the
159 * extended GPIO reg for GPIOs 16-23 is as follows:
160 *
161 * 31: GPIO23_DISABLE
162 * ...
163 * 19: GPIO17_DISABLE
164 * 18: GPIO17_ENABLE
165 * 17: GPIO16_DISABLE
166 * 16: GPIO16_ENABLE
167 * ...
168 * 3: GPIO17_SET
169 * 2: GPIO17_CLEAR
170 * 1: GPIO16_SET
171 * 0: GPIO16_CLEAR
172 */
173
174/* This gives the 'register relative offset gpio' number */
175#define EXTENDED_OFFSET_GPIO(gpio) (gpio < 24 ? gpio - 16 : gpio - 24)
176
177/* These take the 'register relative offset gpio' number */
178#define EXTENDED_REG_DISABLE(ogpio) (0x2 << ((ogpio * 2) + 16))
179#define EXTENDED_REG_ENABLE(ogpio) (0x1 << ((ogpio * 2) + 16))
180#define EXTENDED_REG_SET(ogpio) (0x2 << (ogpio * 2))
181#define EXTENDED_REG_CLR(ogpio) (0x1 << (ogpio * 2))
182
183/* These take the actual GPIO number (16 through 27) */
184#define EXTENDED_DISABLE(gpio) \
185 EXTENDED_REG_DISABLE(EXTENDED_OFFSET_GPIO(gpio))
186#define EXTENDED_ENABLE(gpio) \
187 EXTENDED_REG_ENABLE(EXTENDED_OFFSET_GPIO(gpio))
188#define EXTENDED_SET(gpio) \
189 EXTENDED_REG_SET(EXTENDED_OFFSET_GPIO(gpio))
190#define EXTENDED_CLR(gpio) \
191 EXTENDED_REG_CLR(EXTENDED_OFFSET_GPIO(gpio))
192
193#define EXTENDED_FULL_MASK (0xffffffff)
194
195/* -- API inline-functions -- */
196
197/*
198 * Gets the current value of the specified pin
199 */
200static inline enum msp_gpio_data msp_gpio_pin_get(unsigned int gpio)
201{
202 u32 pinhi_mask = 0, pinhi_mask2 = 0;
203
204 if (gpio >= MSP_NUM_GPIOS)
205 return MSP_GPIO_NONE;
206
207 if (gpio < 16) {
208 pinhi_mask = BASIC_DATA_MASK(gpio);
209 } else {
210 /*
211 * Two cases are possible with the EXTENDED register:
212 * - In output mode (ENABLED flag set), check the CLR bit
213 * - In input mode (ENABLED flag not set), check the SET bit
214 */
215 pinhi_mask = EXTENDED_ENABLE(gpio) | EXTENDED_CLR(gpio);
216 pinhi_mask2 = EXTENDED_SET(gpio);
217 }
218 if (((*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask) == pinhi_mask) ||
219 (*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask2))
220 return MSP_GPIO_HI;
221 else
222 return MSP_GPIO_LO;
223}
224
225/* Sets the specified pin to the specified value */
226static inline void msp_gpio_pin_set(enum msp_gpio_data data, unsigned int gpio)
227{
228 if (gpio >= MSP_NUM_GPIOS)
229 return;
230
231 if (gpio < 16) {
232 if (data == MSP_GPIO_TOGGLE)
233 toggle_reg32(MSP_GPIO_DATA_REGISTER[gpio],
234 BASIC_DATA_MASK(gpio));
235 else if (data == MSP_GPIO_HI)
236 set_reg32(MSP_GPIO_DATA_REGISTER[gpio],
237 BASIC_DATA_MASK(gpio));
238 else
239 clear_reg32(MSP_GPIO_DATA_REGISTER[gpio],
240 BASIC_DATA_MASK(gpio));
241 } else {
242 if (data == MSP_GPIO_TOGGLE) {
243 /* Special ugly case:
244 * We have to read the CLR bit.
245 * If set, we write the CLR bit.
246 * If not, we write the SET bit.
247 */
248 u32 tmpdata;
249
250 custom_read_reg32(MSP_GPIO_DATA_REGISTER[gpio],
251 tmpdata);
252 if (tmpdata & EXTENDED_CLR(gpio))
253 tmpdata = EXTENDED_CLR(gpio);
254 else
255 tmpdata = EXTENDED_SET(gpio);
256 custom_write_reg32(MSP_GPIO_DATA_REGISTER[gpio],
257 tmpdata);
258 } else {
259 u32 newdata;
260
261 if (data == MSP_GPIO_HI)
262 newdata = EXTENDED_SET(gpio);
263 else
264 newdata = EXTENDED_CLR(gpio);
265 set_value_reg32(MSP_GPIO_DATA_REGISTER[gpio],
266 EXTENDED_FULL_MASK, newdata);
267 }
268 }
269}
270
271/* Sets the specified pin to the specified value */
272static inline void msp_gpio_pin_hi(unsigned int gpio)
273{
274 msp_gpio_pin_set(MSP_GPIO_HI, gpio);
275}
276
277/* Sets the specified pin to the specified value */
278static inline void msp_gpio_pin_lo(unsigned int gpio)
279{
280 msp_gpio_pin_set(MSP_GPIO_LO, gpio);
281}
282
283/* Sets the specified pin to the opposite value */
284static inline void msp_gpio_pin_toggle(unsigned int gpio)
285{
286 msp_gpio_pin_set(MSP_GPIO_TOGGLE, gpio);
287}
288
289/* Gets the mode of the specified pin */
290static inline enum msp_gpio_mode msp_gpio_pin_get_mode(unsigned int gpio)
291{
292 enum msp_gpio_mode retval = MSP_GPIO_UNKNOWN;
293 uint32_t data;
294
295 if (gpio >= MSP_NUM_GPIOS)
296 return retval;
297
298 data = *MSP_GPIO_MODE_REGISTER[gpio];
299
300 if (gpio < 16) {
301 retval = BASIC_MODE_FROM_REG(data, gpio);
302 } else {
303 /* Extended pins can only be either INPUT or OUTPUT */
304 if (data & EXTENDED_ENABLE(gpio))
305 retval = MSP_GPIO_OUTPUT;
306 else
307 retval = MSP_GPIO_INPUT;
308 }
309
310 return retval;
311}
312
313/*
314 * Sets the specified mode on the requested pin
315 * Returns 0 on success, or -1 if that mode is not allowed on this pin
316 */
317static inline int msp_gpio_pin_mode(enum msp_gpio_mode mode, unsigned int gpio)
318{
319 u32 modemask, newmode;
320
321 if ((1 << gpio) & ~MSP_GPIO_MODE_ALLOWED[mode])
322 return -1;
323
324 if (gpio >= MSP_NUM_GPIOS)
325 return -1;
326
327 if (gpio < 16) {
328 modemask = BASIC_MODE_MASK(gpio);
329 newmode = BASIC_MODE(mode, gpio);
330 } else {
331 modemask = EXTENDED_FULL_MASK;
332 if (mode == MSP_GPIO_INPUT)
333 newmode = EXTENDED_DISABLE(gpio);
334 else
335 newmode = EXTENDED_ENABLE(gpio);
336 }
337 /* Do the set atomically */
338 set_value_reg32(MSP_GPIO_MODE_REGISTER[gpio], modemask, newmode);
339
340 return 0;
341}
342
343#endif /* __MSP_GPIO_MACROS_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
index 60a5a38dd5b..7d41474e548 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
@@ -205,7 +205,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
205 * custom_read_reg32(address, tmp); <-- Reads the address and put the value 205 * custom_read_reg32(address, tmp); <-- Reads the address and put the value
206 * in the 'tmp' variable given 206 * in the 'tmp' variable given
207 * 207 *
208 * From here on out, you are (basicly) atomic, so don't do anything too 208 * From here on out, you are (basically) atomic, so don't do anything too
209 * fancy! 209 * fancy!
210 * Also, this code may loop if the end of this block fails to write 210 * Also, this code may loop if the end of this block fails to write
211 * everything back safely due do the other CPU, so do NOT do anything 211 * everything back safely due do the other CPU, so do NOT do anything
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
index 603eb737b4a..692c1b658b9 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
@@ -91,12 +91,10 @@
91 /* MAC C device registers */ 91 /* MAC C device registers */
92#define MSP_ADSL2_BASE (MSP_MSB_BASE + 0xA80000) 92#define MSP_ADSL2_BASE (MSP_MSB_BASE + 0xA80000)
93 /* ADSL2 device registers */ 93 /* ADSL2 device registers */
94#define MSP_USB_BASE (MSP_MSB_BASE + 0xB40000) 94#define MSP_USB0_BASE (MSP_MSB_BASE + 0xB00000)
95 /* USB device registers */ 95 /* USB0 device registers */
96#define MSP_USB_BASE_START (MSP_MSB_BASE + 0xB40100) 96#define MSP_USB1_BASE (MSP_MSB_BASE + 0x300000)
97 /* USB device registers */ 97 /* USB1 device registers */
98#define MSP_USB_BASE_END (MSP_MSB_BASE + 0xB401FF)
99 /* USB device registers */
100#define MSP_CPUIF_BASE (MSP_MSB_BASE + 0xC00000) 98#define MSP_CPUIF_BASE (MSP_MSB_BASE + 0xC00000)
101 /* CPU interface registers */ 99 /* CPU interface registers */
102 100
@@ -319,8 +317,11 @@
319#define CPU_ERR2_REG regptr(MSP_SLP_BASE + 0x184) 317#define CPU_ERR2_REG regptr(MSP_SLP_BASE + 0x184)
320 /* CPU/SLP Error status 1 */ 318 /* CPU/SLP Error status 1 */
321 319
322#define EXTENDED_GPIO_REG regptr(MSP_SLP_BASE + 0x188) 320/* Extended GPIO registers */
323 /* Extended GPIO register */ 321#define EXTENDED_GPIO1_REG regptr(MSP_SLP_BASE + 0x188)
322#define EXTENDED_GPIO2_REG regptr(MSP_SLP_BASE + 0x18c)
323#define EXTENDED_GPIO_REG EXTENDED_GPIO1_REG
324 /* Backward-compatibility */
324 325
325/* System Error registers */ 326/* System Error registers */
326#define SLP_ERR_STS_REG regptr(MSP_SLP_BASE + 0x190) 327#define SLP_ERR_STS_REG regptr(MSP_SLP_BASE + 0x190)
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h
new file mode 100644
index 00000000000..4c9348df9df
--- /dev/null
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h
@@ -0,0 +1,144 @@
1/******************************************************************
2 * Copyright (c) 2000-2007 PMC-Sierra INC.
3 *
4 * This program is free software; you can redistribute it
5 * and/or modify it under the terms of the GNU General
6 * Public License as published by the Free Software
7 * Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
19 * 02139, USA.
20 *
21 * PMC-SIERRA INC. DISCLAIMS ANY LIABILITY OF ANY KIND
22 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS
23 * SOFTWARE.
24 */
25#ifndef MSP_USB_H_
26#define MSP_USB_H_
27
28#ifdef CONFIG_MSP_HAS_DUAL_USB
29#define NUM_USB_DEVS 2
30#else
31#define NUM_USB_DEVS 1
32#endif
33
34/* Register spaces for USB host 0 */
35#define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0)
36#define MSP_USB0_MAB_END (MSP_USB0_BASE + 0x17)
37#define MSP_USB0_ID_START (MSP_USB0_BASE + 0x40000)
38#define MSP_USB0_ID_END (MSP_USB0_BASE + 0x4008f)
39#define MSP_USB0_HS_START (MSP_USB0_BASE + 0x40100)
40#define MSP_USB0_HS_END (MSP_USB0_BASE + 0x401FF)
41
42/* Register spaces for USB host 1 */
43#define MSP_USB1_MAB_START (MSP_USB1_BASE + 0x0)
44#define MSP_USB1_MAB_END (MSP_USB1_BASE + 0x17)
45#define MSP_USB1_ID_START (MSP_USB1_BASE + 0x40000)
46#define MSP_USB1_ID_END (MSP_USB1_BASE + 0x4008f)
47#define MSP_USB1_HS_START (MSP_USB1_BASE + 0x40100)
48#define MSP_USB1_HS_END (MSP_USB1_BASE + 0x401ff)
49
50/* USB Identification registers */
51struct msp_usbid_regs {
52 u32 id; /* 0x0: Identification register */
53 u32 hwgen; /* 0x4: General HW params */
54 u32 hwhost; /* 0x8: Host HW params */
55 u32 hwdev; /* 0xc: Device HW params */
56 u32 hwtxbuf; /* 0x10: Tx buffer HW params */
57 u32 hwrxbuf; /* 0x14: Rx buffer HW params */
58 u32 reserved[26];
59 u32 timer0_load; /* 0x80: General-purpose timer 0 load*/
60 u32 timer0_ctrl; /* 0x84: General-purpose timer 0 control */
61 u32 timer1_load; /* 0x88: General-purpose timer 1 load*/
62 u32 timer1_ctrl; /* 0x8c: General-purpose timer 1 control */
63};
64
65/* MSBus to AMBA registers */
66struct msp_mab_regs {
67 u32 isr; /* 0x0: Interrupt status */
68 u32 imr; /* 0x4: Interrupt mask */
69 u32 thcr0; /* 0x8: Transaction header capture 0 */
70 u32 thcr1; /* 0xc: Transaction header capture 1 */
71 u32 int_stat; /* 0x10: Interrupt status summary */
72 u32 phy_cfg; /* 0x14: USB phy config */
73};
74
75/* EHCI registers */
76struct msp_usbhs_regs {
77 u32 hciver; /* 0x0: Version and offset to operational regs */
78 u32 hcsparams; /* 0x4: Host control structural parameters */
79 u32 hccparams; /* 0x8: Host control capability parameters */
80 u32 reserved0[5];
81 u32 dciver; /* 0x20: Device interface version */
82 u32 dccparams; /* 0x24: Device control capability parameters */
83 u32 reserved1[6];
84 u32 cmd; /* 0x40: USB command */
85 u32 sts; /* 0x44: USB status */
86 u32 int_ena; /* 0x48: USB interrupt enable */
87 u32 frindex; /* 0x4c: Frame index */
88 u32 reserved3;
89 union {
90 struct {
91 u32 flb_addr; /* 0x54: Frame list base address */
92 u32 next_async_addr; /* 0x58: next asynchronous addr */
93 u32 ttctrl; /* 0x5c: embedded transaction translator
94 async buffer status */
95 u32 burst_size; /* 0x60: Controller burst size */
96 u32 tx_fifo_ctrl; /* 0x64: Tx latency FIFO tuning */
97 u32 reserved0[4];
98 u32 endpt_nak; /* 0x78: Endpoint NAK */
99 u32 endpt_nak_ena; /* 0x7c: Endpoint NAK enable */
100 u32 cfg_flag; /* 0x80: Config flag */
101 u32 port_sc1; /* 0x84: Port status & control 1 */
102 u32 reserved1[7];
103 u32 otgsc; /* 0xa4: OTG status & control */
104 u32 mode; /* 0xa8: USB controller mode */
105 } host;
106
107 struct {
108 u32 dev_addr; /* 0x54: Device address */
109 u32 endpt_list_addr; /* 0x58: Endpoint list address */
110 u32 reserved0[7];
111 u32 endpt_nak; /* 0x74 */
112 u32 endpt_nak_ctrl; /* 0x78 */
113 u32 cfg_flag; /* 0x80 */
114 u32 port_sc1; /* 0x84: Port status & control 1 */
115 u32 reserved[7];
116 u32 otgsc; /* 0xa4: OTG status & control */
117 u32 mode; /* 0xa8: USB controller mode */
118 u32 endpt_setup_stat; /* 0xac */
119 u32 endpt_prime; /* 0xb0 */
120 u32 endpt_flush; /* 0xb4 */
121 u32 endpt_stat; /* 0xb8 */
122 u32 endpt_complete; /* 0xbc */
123 u32 endpt_ctrl0; /* 0xc0 */
124 u32 endpt_ctrl1; /* 0xc4 */
125 u32 endpt_ctrl2; /* 0xc8 */
126 u32 endpt_ctrl3; /* 0xcc */
127 } device;
128 } u;
129};
130/*
131 * Container for the more-generic platform_device.
132 * This exists mainly as a way to map the non-standard register
133 * spaces and make them accessible to the USB ISR.
134 */
135struct mspusb_device {
136 struct msp_mab_regs __iomem *mab_regs;
137 struct msp_usbid_regs __iomem *usbid_regs;
138 struct msp_usbhs_regs __iomem *usbhs_regs;
139 struct platform_device dev;
140};
141
142#define to_mspusb_device(x) container_of((x), struct mspusb_device, dev)
143#define TO_HOST_ID(x) ((x) & 0x3)
144#endif /*MSP_USB_H_*/
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ead6928fa6b..c104f1039a6 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -337,7 +337,7 @@ unsigned long get_wchan(struct task_struct *p);
337/* 337/*
338 * Return_address is a replacement for __builtin_return_address(count) 338 * Return_address is a replacement for __builtin_return_address(count)
339 * which on certain architectures cannot reasonably be implemented in GCC 339 * which on certain architectures cannot reasonably be implemented in GCC
340 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). 340 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
341 * Note that __builtin_return_address(x>=1) is forbidden because GCC 341 * Note that __builtin_return_address(x>=1) is forbidden because GCC
342 * aborts compilation on some CPUs. It's simply not possible to unwind 342 * aborts compilation on some CPUs. It's simply not possible to unwind
343 * some CPU's stackframes. 343 * some CPU's stackframes.
diff --git a/arch/mips/include/asm/sgi/ioc.h b/arch/mips/include/asm/sgi/ioc.h
index 57a971904cf..380347b648e 100644
--- a/arch/mips/include/asm/sgi/ioc.h
+++ b/arch/mips/include/asm/sgi/ioc.h
@@ -17,7 +17,7 @@
17#include <asm/sgi/pi1.h> 17#include <asm/sgi/pi1.h>
18 18
19/* 19/*
20 * All registers are 8-bit wide alligned on 32-bit boundary. Bad things 20 * All registers are 8-bit wide aligned on 32-bit boundary. Bad things
21 * happen if you try word access them. You have been warned. 21 * happen if you try word access them. You have been warned.
22 */ 22 */
23 23
diff --git a/arch/mips/include/asm/sibyte/sb1250_mac.h b/arch/mips/include/asm/sibyte/sb1250_mac.h
index 591b9061fd8..77f78728423 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mac.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mac.h
@@ -520,7 +520,7 @@
520#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER) 520#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
521 521
522/* 522/*
523 * MAC Recieve Address Filter Exact Match Registers (Table 9-21) 523 * MAC Receive Address Filter Exact Match Registers (Table 9-21)
524 * Registers: MAC_ADDR0_0 through MAC_ADDR7_0 524 * Registers: MAC_ADDR0_0 through MAC_ADDR7_0
525 * Registers: MAC_ADDR0_1 through MAC_ADDR7_1 525 * Registers: MAC_ADDR0_1 through MAC_ADDR7_1
526 * Registers: MAC_ADDR0_2 through MAC_ADDR7_2 526 * Registers: MAC_ADDR0_2 through MAC_ADDR7_2
@@ -538,7 +538,7 @@
538/* No bitfields */ 538/* No bitfields */
539 539
540/* 540/*
541 * MAC Recieve Address Filter Hash Match Registers (Table 9-22) 541 * MAC Receive Address Filter Hash Match Registers (Table 9-22)
542 * Registers: MAC_HASH0_0 through MAC_HASH7_0 542 * Registers: MAC_HASH0_0 through MAC_HASH7_0
543 * Registers: MAC_HASH0_1 through MAC_HASH7_1 543 * Registers: MAC_HASH0_1 through MAC_HASH7_1
544 * Registers: MAC_HASH0_2 through MAC_HASH7_2 544 * Registers: MAC_HASH0_2 through MAC_HASH7_2
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
index 1ca64b4d33d..20ebeb875ee 100644
--- a/arch/mips/include/asm/siginfo.h
+++ b/arch/mips/include/asm/siginfo.h
@@ -101,7 +101,7 @@ typedef struct siginfo {
101 101
102/* 102/*
103 * si_code values 103 * si_code values
104 * Again these have been choosen to be IRIX compatible. 104 * Again these have been chosen to be IRIX compatible.
105 */ 105 */
106#undef SI_ASYNCIO 106#undef SI_ASYNCIO
107#undef SI_TIMER 107#undef SI_TIMER
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
index 09e590daca1..fe02900b930 100644
--- a/arch/mips/include/asm/sn/klconfig.h
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -78,7 +78,7 @@ typedef s32 klconf_off_t;
78 */ 78 */
79#define MAX_SLOTS_PER_NODE (1 + 2 + 6 + 2) 79#define MAX_SLOTS_PER_NODE (1 + 2 + 6 + 2)
80 80
81/* XXX if each node is guranteed to have some memory */ 81/* XXX if each node is guaranteed to have some memory */
82 82
83#define MAX_PCI_DEVS 8 83#define MAX_PCI_DEVS 8
84 84
@@ -539,7 +539,7 @@ typedef struct klinfo_s { /* Generic info */
539#define KLSTRUCT_IOC3_TTY 24 539#define KLSTRUCT_IOC3_TTY 24
540 540
541/* Early Access IO proms are compatible 541/* Early Access IO proms are compatible
542 only with KLSTRUCT values upto 24. */ 542 only with KLSTRUCT values up to 24. */
543 543
544#define KLSTRUCT_FIBERCHANNEL 25 544#define KLSTRUCT_FIBERCHANNEL 25
545#define KLSTRUCT_MOD_SERIAL_NUM 26 545#define KLSTRUCT_MOD_SERIAL_NUM 26
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 31c76c021bb..46286d8302a 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -622,7 +622,7 @@ typedef union h1_icrbb_u {
622 */ 622 */
623#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */ 623#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
624#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */ 624#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
625#define IIO_ICRB_GB_REQ 2 /* Source is Guranteed BW request */ 625#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
626#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */ 626#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
627 627
628/* 628/*
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 396e402fbe2..ca61e846ab0 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -245,16 +245,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
245 __asm__ __volatile__( 245 __asm__ __volatile__(
246 " .set noreorder # arch_read_lock \n" 246 " .set noreorder # arch_read_lock \n"
247 "1: ll %1, %2 \n" 247 "1: ll %1, %2 \n"
248 " bltz %1, 2f \n" 248 " bltz %1, 3f \n"
249 " addu %1, 1 \n" 249 " addu %1, 1 \n"
250 " sc %1, %0 \n" 250 "2: sc %1, %0 \n"
251 " beqz %1, 1b \n" 251 " beqz %1, 1b \n"
252 " nop \n" 252 " nop \n"
253 " .subsection 2 \n" 253 " .subsection 2 \n"
254 "2: ll %1, %2 \n" 254 "3: ll %1, %2 \n"
255 " bltz %1, 2b \n" 255 " bltz %1, 3b \n"
256 " addu %1, 1 \n" 256 " addu %1, 1 \n"
257 " b 1b \n" 257 " b 2b \n"
258 " nop \n" 258 " nop \n"
259 " .previous \n" 259 " .previous \n"
260 " .set reorder \n" 260 " .set reorder \n"
@@ -324,16 +324,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
324 __asm__ __volatile__( 324 __asm__ __volatile__(
325 " .set noreorder # arch_write_lock \n" 325 " .set noreorder # arch_write_lock \n"
326 "1: ll %1, %2 \n" 326 "1: ll %1, %2 \n"
327 " bnez %1, 2f \n" 327 " bnez %1, 3f \n"
328 " lui %1, 0x8000 \n" 328 " lui %1, 0x8000 \n"
329 " sc %1, %0 \n" 329 "2: sc %1, %0 \n"
330 " beqz %1, 2f \n" 330 " beqz %1, 3f \n"
331 " nop \n" 331 " nop \n"
332 " .subsection 2 \n" 332 " .subsection 2 \n"
333 "2: ll %1, %2 \n" 333 "3: ll %1, %2 \n"
334 " bnez %1, 2b \n" 334 " bnez %1, 3b \n"
335 " lui %1, 0x8000 \n" 335 " lui %1, 0x8000 \n"
336 " b 1b \n" 336 " b 2b \n"
337 " nop \n" 337 " nop \n"
338 " .previous \n" 338 " .previous \n"
339 " .set reorder \n" 339 " .set reorder \n"
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 58730c5ce4b..b4ba2449444 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -346,7 +346,7 @@
346 * we can't dispatch it directly without trashing 346 * we can't dispatch it directly without trashing
347 * some registers, so we'll try to detect this unlikely 347 * some registers, so we'll try to detect this unlikely
348 * case and program a software interrupt in the VPE, 348 * case and program a software interrupt in the VPE,
349 * as would be done for a cross-VPE IPI. To accomodate 349 * as would be done for a cross-VPE IPI. To accommodate
350 * the handling of that case, we're doing a DVPE instead 350 * the handling of that case, we're doing a DVPE instead
351 * of just a DMT here to protect against other threads. 351 * of just a DMT here to protect against other threads.
352 * This is a lot of cruft to cover a tiny window. 352 * This is a lot of cruft to cover a tiny window.
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d309556cacf..d71160de4d1 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -88,9 +88,11 @@ register struct thread_info *__current_thread_info __asm__("$28");
88#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 88#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
89 89
90#ifdef CONFIG_DEBUG_STACK_USAGE 90#ifdef CONFIG_DEBUG_STACK_USAGE
91#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 91#define alloc_thread_info_node(tsk, node) \
92 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
92#else 93#else
93#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 94#define alloc_thread_info_node(tsk, node) \
95 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
94#endif 96#endif
95 97
96#define free_thread_info(info) kfree(info) 98#define free_thread_info(info) kfree(info)
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index 544a2854598..533812b6188 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -33,14 +33,6 @@ typedef unsigned short umode_t;
33#ifdef __KERNEL__ 33#ifdef __KERNEL__
34#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
35 35
36#if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \
37 || defined(CONFIG_64BIT)
38typedef u64 dma_addr_t;
39#else
40typedef u32 dma_addr_t;
41#endif
42typedef u64 dma64_addr_t;
43
44/* 36/*
45 * Don't use phys_t. You've been warned. 37 * Don't use phys_t. You've been warned.
46 */ 38 */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 550725b881d..fa2e37ea2be 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -359,16 +359,20 @@
359#define __NR_fanotify_init (__NR_Linux + 336) 359#define __NR_fanotify_init (__NR_Linux + 336)
360#define __NR_fanotify_mark (__NR_Linux + 337) 360#define __NR_fanotify_mark (__NR_Linux + 337)
361#define __NR_prlimit64 (__NR_Linux + 338) 361#define __NR_prlimit64 (__NR_Linux + 338)
362#define __NR_name_to_handle_at (__NR_Linux + 339)
363#define __NR_open_by_handle_at (__NR_Linux + 340)
364#define __NR_clock_adjtime (__NR_Linux + 341)
365#define __NR_syncfs (__NR_Linux + 342)
362 366
363/* 367/*
364 * Offset of the last Linux o32 flavoured syscall 368 * Offset of the last Linux o32 flavoured syscall
365 */ 369 */
366#define __NR_Linux_syscalls 338 370#define __NR_Linux_syscalls 342
367 371
368#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 372#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
369 373
370#define __NR_O32_Linux 4000 374#define __NR_O32_Linux 4000
371#define __NR_O32_Linux_syscalls 338 375#define __NR_O32_Linux_syscalls 342
372 376
373#if _MIPS_SIM == _MIPS_SIM_ABI64 377#if _MIPS_SIM == _MIPS_SIM_ABI64
374 378
@@ -674,16 +678,20 @@
674#define __NR_fanotify_init (__NR_Linux + 295) 678#define __NR_fanotify_init (__NR_Linux + 295)
675#define __NR_fanotify_mark (__NR_Linux + 296) 679#define __NR_fanotify_mark (__NR_Linux + 296)
676#define __NR_prlimit64 (__NR_Linux + 297) 680#define __NR_prlimit64 (__NR_Linux + 297)
681#define __NR_name_to_handle_at (__NR_Linux + 298)
682#define __NR_open_by_handle_at (__NR_Linux + 299)
683#define __NR_clock_adjtime (__NR_Linux + 300)
684#define __NR_syncfs (__NR_Linux + 301)
677 685
678/* 686/*
679 * Offset of the last Linux 64-bit flavoured syscall 687 * Offset of the last Linux 64-bit flavoured syscall
680 */ 688 */
681#define __NR_Linux_syscalls 297 689#define __NR_Linux_syscalls 301
682 690
683#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 691#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
684 692
685#define __NR_64_Linux 5000 693#define __NR_64_Linux 5000
686#define __NR_64_Linux_syscalls 297 694#define __NR_64_Linux_syscalls 301
687 695
688#if _MIPS_SIM == _MIPS_SIM_NABI32 696#if _MIPS_SIM == _MIPS_SIM_NABI32
689 697
@@ -994,16 +1002,20 @@
994#define __NR_fanotify_init (__NR_Linux + 300) 1002#define __NR_fanotify_init (__NR_Linux + 300)
995#define __NR_fanotify_mark (__NR_Linux + 301) 1003#define __NR_fanotify_mark (__NR_Linux + 301)
996#define __NR_prlimit64 (__NR_Linux + 302) 1004#define __NR_prlimit64 (__NR_Linux + 302)
1005#define __NR_name_to_handle_at (__NR_Linux + 303)
1006#define __NR_open_by_handle_at (__NR_Linux + 304)
1007#define __NR_clock_adjtime (__NR_Linux + 305)
1008#define __NR_syncfs (__NR_Linux + 306)
997 1009
998/* 1010/*
999 * Offset of the last N32 flavoured syscall 1011 * Offset of the last N32 flavoured syscall
1000 */ 1012 */
1001#define __NR_Linux_syscalls 302 1013#define __NR_Linux_syscalls 306
1002 1014
1003#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1015#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1004 1016
1005#define __NR_N32_Linux 6000 1017#define __NR_N32_Linux 6000
1006#define __NR_N32_Linux_syscalls 302 1018#define __NR_N32_Linux_syscalls 306
1007 1019
1008#ifdef __KERNEL__ 1020#ifdef __KERNEL__
1009 1021
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
index 22361d5e3bf..fa133c1bc1f 100644
--- a/arch/mips/include/asm/war.h
+++ b/arch/mips/include/asm/war.h
@@ -227,7 +227,7 @@
227#endif 227#endif
228 228
229/* 229/*
230 * On the R10000 upto version 2.6 (not sure about 2.7) there is a bug that 230 * On the R10000 up to version 2.6 (not sure about 2.7) there is a bug that
231 * may cause ll / sc and lld / scd sequences to execute non-atomically. 231 * may cause ll / sc and lld / scd sequences to execute non-atomically.
232 */ 232 */
233#ifndef R10000_LLSC_WAR 233#ifndef R10000_LLSC_WAR
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 35b3e2f0af0..260df475094 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -23,9 +23,9 @@
23 23
24static DEFINE_RAW_SPINLOCK(r4030_lock); 24static DEFINE_RAW_SPINLOCK(r4030_lock);
25 25
26static void enable_r4030_irq(unsigned int irq) 26static void enable_r4030_irq(struct irq_data *d)
27{ 27{
28 unsigned int mask = 1 << (irq - JAZZ_IRQ_START); 28 unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START);
29 unsigned long flags; 29 unsigned long flags;
30 30
31 raw_spin_lock_irqsave(&r4030_lock, flags); 31 raw_spin_lock_irqsave(&r4030_lock, flags);
@@ -34,9 +34,9 @@ static void enable_r4030_irq(unsigned int irq)
34 raw_spin_unlock_irqrestore(&r4030_lock, flags); 34 raw_spin_unlock_irqrestore(&r4030_lock, flags);
35} 35}
36 36
37void disable_r4030_irq(unsigned int irq) 37void disable_r4030_irq(struct irq_data *d)
38{ 38{
39 unsigned int mask = ~(1 << (irq - JAZZ_IRQ_START)); 39 unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START));
40 unsigned long flags; 40 unsigned long flags;
41 41
42 raw_spin_lock_irqsave(&r4030_lock, flags); 42 raw_spin_lock_irqsave(&r4030_lock, flags);
@@ -47,10 +47,8 @@ void disable_r4030_irq(unsigned int irq)
47 47
48static struct irq_chip r4030_irq_type = { 48static struct irq_chip r4030_irq_type = {
49 .name = "R4030", 49 .name = "R4030",
50 .ack = disable_r4030_irq, 50 .irq_mask = disable_r4030_irq,
51 .mask = disable_r4030_irq, 51 .irq_unmask = enable_r4030_irq,
52 .mask_ack = disable_r4030_irq,
53 .unmask = enable_r4030_irq,
54}; 52};
55 53
56void __init init_r4030_ints(void) 54void __init init_r4030_ints(void)
@@ -58,7 +56,7 @@ void __init init_r4030_ints(void)
58 int i; 56 int i;
59 57
60 for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) 58 for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
61 set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq); 59 irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
62 60
63 r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); 61 r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
64 r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ 62 r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index a604eaeb6c0..a9dff332125 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -17,4 +17,4 @@ obj-$(CONFIG_JZ4740_QI_LB60) += board-qi_lb60.o
17 17
18obj-$(CONFIG_PM) += pm.o 18obj-$(CONFIG_PM) += pm.o
19 19
20EXTRA_CFLAGS += -Werror -Wall 20ccflags-y := -Werror -Wall
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 2c0e107966a..c3b04be3fb2 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -23,6 +23,7 @@
23#include <linux/spi/spi_gpio.h> 23#include <linux/spi/spi_gpio.h>
24#include <linux/power_supply.h> 24#include <linux/power_supply.h>
25#include <linux/power/jz4740-battery.h> 25#include <linux/power/jz4740-battery.h>
26#include <linux/power/gpio-charger.h>
26 27
27#include <asm/mach-jz4740/jz4740_fb.h> 28#include <asm/mach-jz4740/jz4740_fb.h>
28#include <asm/mach-jz4740/jz4740_mmc.h> 29#include <asm/mach-jz4740/jz4740_mmc.h>
@@ -49,14 +50,14 @@ static bool is_avt2;
49 50
50/* NAND */ 51/* NAND */
51static struct nand_ecclayout qi_lb60_ecclayout_1gb = { 52static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
52/* .eccbytes = 36, 53 .eccbytes = 36,
53 .eccpos = { 54 .eccpos = {
54 6, 7, 8, 9, 10, 11, 12, 13, 55 6, 7, 8, 9, 10, 11, 12, 13,
55 14, 15, 16, 17, 18, 19, 20, 21, 56 14, 15, 16, 17, 18, 19, 20, 21,
56 22, 23, 24, 25, 26, 27, 28, 29, 57 22, 23, 24, 25, 26, 27, 28, 29,
57 30, 31, 32, 33, 34, 35, 36, 37, 58 30, 31, 32, 33, 34, 35, 36, 37,
58 38, 39, 40, 41 59 38, 39, 40, 41
59 },*/ 60 },
60 .oobfree = { 61 .oobfree = {
61 { .offset = 2, .length = 4 }, 62 { .offset = 2, .length = 4 },
62 { .offset = 42, .length = 22 } 63 { .offset = 42, .length = 22 }
@@ -64,7 +65,7 @@ static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
64}; 65};
65 66
66/* Early prototypes of the QI LB60 had only 1GB of NAND. 67/* Early prototypes of the QI LB60 had only 1GB of NAND.
67 * In order to support these devices aswell the partition and ecc layout is 68 * In order to support these devices as well the partition and ecc layout is
68 * initialized depending on the NAND size */ 69 * initialized depending on the NAND size */
69static struct mtd_partition qi_lb60_partitions_1gb[] = { 70static struct mtd_partition qi_lb60_partitions_1gb[] = {
70 { 71 {
@@ -85,7 +86,7 @@ static struct mtd_partition qi_lb60_partitions_1gb[] = {
85}; 86};
86 87
87static struct nand_ecclayout qi_lb60_ecclayout_2gb = { 88static struct nand_ecclayout qi_lb60_ecclayout_2gb = {
88/* .eccbytes = 72, 89 .eccbytes = 72,
89 .eccpos = { 90 .eccpos = {
90 12, 13, 14, 15, 16, 17, 18, 19, 91 12, 13, 14, 15, 16, 17, 18, 19,
91 20, 21, 22, 23, 24, 25, 26, 27, 92 20, 21, 22, 23, 24, 25, 26, 27,
@@ -96,7 +97,7 @@ static struct nand_ecclayout qi_lb60_ecclayout_2gb = {
96 60, 61, 62, 63, 64, 65, 66, 67, 97 60, 61, 62, 63, 64, 65, 66, 67,
97 68, 69, 70, 71, 72, 73, 74, 75, 98 68, 69, 70, 71, 72, 73, 74, 75,
98 76, 77, 78, 79, 80, 81, 82, 83 99 76, 77, 78, 79, 80, 81, 82, 83
99 },*/ 100 },
100 .oobfree = { 101 .oobfree = {
101 { .offset = 2, .length = 10 }, 102 { .offset = 2, .length = 10 },
102 { .offset = 84, .length = 44 }, 103 { .offset = 84, .length = 44 },
@@ -396,6 +397,28 @@ static struct platform_device qi_lb60_pwm_beeper = {
396 }, 397 },
397}; 398};
398 399
400/* charger */
401static char *qi_lb60_batteries[] = {
402 "battery",
403};
404
405static struct gpio_charger_platform_data qi_lb60_charger_pdata = {
406 .name = "usb",
407 .type = POWER_SUPPLY_TYPE_USB,
408 .gpio = JZ_GPIO_PORTD(28),
409 .gpio_active_low = 1,
410 .supplied_to = qi_lb60_batteries,
411 .num_supplicants = ARRAY_SIZE(qi_lb60_batteries),
412};
413
414static struct platform_device qi_lb60_charger_device = {
415 .name = "gpio-charger",
416 .dev = {
417 .platform_data = &qi_lb60_charger_pdata,
418 },
419};
420
421
399static struct platform_device *jz_platform_devices[] __initdata = { 422static struct platform_device *jz_platform_devices[] __initdata = {
400 &jz4740_udc_device, 423 &jz4740_udc_device,
401 &jz4740_mmc_device, 424 &jz4740_mmc_device,
@@ -410,12 +433,13 @@ static struct platform_device *jz_platform_devices[] __initdata = {
410 &jz4740_adc_device, 433 &jz4740_adc_device,
411 &qi_lb60_gpio_keys, 434 &qi_lb60_gpio_keys,
412 &qi_lb60_pwm_beeper, 435 &qi_lb60_pwm_beeper,
436 &qi_lb60_charger_device,
413}; 437};
414 438
415static void __init board_gpio_setup(void) 439static void __init board_gpio_setup(void)
416{ 440{
417 /* We only need to enable/disable pullup here for pins used in generic 441 /* We only need to enable/disable pullup here for pins used in generic
418 * drivers. Everything else is done by the drivers themselfs. */ 442 * drivers. Everything else is done by the drivers themselves. */
419 jz_gpio_disable_pullup(QI_LB60_GPIO_SD_VCC_EN_N); 443 jz_gpio_disable_pullup(QI_LB60_GPIO_SD_VCC_EN_N);
420 jz_gpio_disable_pullup(QI_LB60_GPIO_SD_CD); 444 jz_gpio_disable_pullup(QI_LB60_GPIO_SD_CD);
421} 445}
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 88e6aeda5bf..73031f7fc82 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -86,7 +86,6 @@ struct jz_gpio_chip {
86 spinlock_t lock; 86 spinlock_t lock;
87 87
88 struct gpio_chip gpio_chip; 88 struct gpio_chip gpio_chip;
89 struct irq_chip irq_chip;
90 struct sys_device sysdev; 89 struct sys_device sysdev;
91}; 90};
92 91
@@ -102,9 +101,9 @@ static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *g
102 return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip); 101 return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip);
103} 102}
104 103
105static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(unsigned int irq) 104static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
106{ 105{
107 return get_irq_chip_data(irq); 106 return irq_data_get_irq_chip_data(data);
108} 107}
109 108
110static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg) 109static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
@@ -307,7 +306,7 @@ static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
307 uint32_t flag; 306 uint32_t flag;
308 unsigned int gpio_irq; 307 unsigned int gpio_irq;
309 unsigned int gpio_bank; 308 unsigned int gpio_bank;
310 struct jz_gpio_chip *chip = get_irq_desc_data(desc); 309 struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
311 310
312 gpio_bank = JZ4740_IRQ_GPIO0 - irq; 311 gpio_bank = JZ4740_IRQ_GPIO0 - irq;
313 312
@@ -325,62 +324,52 @@ static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
325 generic_handle_irq(gpio_irq); 324 generic_handle_irq(gpio_irq);
326}; 325};
327 326
328static inline void jz_gpio_set_irq_bit(unsigned int irq, unsigned int reg) 327static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
329{ 328{
330 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(irq); 329 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
331 writel(IRQ_TO_BIT(irq), chip->base + reg); 330 writel(IRQ_TO_BIT(data->irq), chip->base + reg);
332} 331}
333 332
334static void jz_gpio_irq_mask(unsigned int irq) 333static void jz_gpio_irq_mask(struct irq_data *data)
335{ 334{
336 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_MASK_SET); 335 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_SET);
337}; 336};
338 337
339static void jz_gpio_irq_unmask(unsigned int irq) 338static void jz_gpio_irq_unmask(struct irq_data *data)
340{ 339{
341 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(irq); 340 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
342 341
343 jz_gpio_check_trigger_both(chip, irq); 342 jz_gpio_check_trigger_both(chip, data->irq);
344 343
345 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_MASK_CLEAR); 344 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_CLEAR);
346}; 345};
347 346
348/* TODO: Check if function is gpio */ 347/* TODO: Check if function is gpio */
349static unsigned int jz_gpio_irq_startup(unsigned int irq) 348static unsigned int jz_gpio_irq_startup(struct irq_data *data)
350{ 349{
351 struct irq_desc *desc = irq_to_desc(irq); 350 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
352 351 jz_gpio_irq_unmask(data);
353 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_SELECT_SET);
354
355 desc->status &= ~IRQ_MASKED;
356 jz_gpio_irq_unmask(irq);
357
358 return 0; 352 return 0;
359} 353}
360 354
361static void jz_gpio_irq_shutdown(unsigned int irq) 355static void jz_gpio_irq_shutdown(struct irq_data *data)
362{ 356{
363 struct irq_desc *desc = irq_to_desc(irq); 357 jz_gpio_irq_mask(data);
364
365 jz_gpio_irq_mask(irq);
366 desc->status |= IRQ_MASKED;
367 358
368 /* Set direction to input */ 359 /* Set direction to input */
369 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_DIRECTION_CLEAR); 360 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
370 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_SELECT_CLEAR); 361 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
371} 362}
372 363
373static void jz_gpio_irq_ack(unsigned int irq) 364static void jz_gpio_irq_ack(struct irq_data *data)
374{ 365{
375 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_FLAG_CLEAR); 366 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_FLAG_CLEAR);
376}; 367};
377 368
378static int jz_gpio_irq_set_type(unsigned int irq, unsigned int flow_type) 369static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
379{ 370{
380 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(irq); 371 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
381 struct irq_desc *desc = irq_to_desc(irq); 372 unsigned int irq = data->irq;
382
383 jz_gpio_irq_mask(irq);
384 373
385 if (flow_type == IRQ_TYPE_EDGE_BOTH) { 374 if (flow_type == IRQ_TYPE_EDGE_BOTH) {
386 uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN); 375 uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
@@ -395,45 +384,54 @@ static int jz_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
395 384
396 switch (flow_type) { 385 switch (flow_type) {
397 case IRQ_TYPE_EDGE_RISING: 386 case IRQ_TYPE_EDGE_RISING:
398 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_DIRECTION_SET); 387 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
399 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_TRIGGER_SET); 388 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
400 break; 389 break;
401 case IRQ_TYPE_EDGE_FALLING: 390 case IRQ_TYPE_EDGE_FALLING:
402 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_DIRECTION_CLEAR); 391 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
403 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_TRIGGER_SET); 392 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
404 break; 393 break;
405 case IRQ_TYPE_LEVEL_HIGH: 394 case IRQ_TYPE_LEVEL_HIGH:
406 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_DIRECTION_SET); 395 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
407 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_TRIGGER_CLEAR); 396 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
408 break; 397 break;
409 case IRQ_TYPE_LEVEL_LOW: 398 case IRQ_TYPE_LEVEL_LOW:
410 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_DIRECTION_CLEAR); 399 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
411 jz_gpio_set_irq_bit(irq, JZ_REG_GPIO_TRIGGER_CLEAR); 400 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
412 break; 401 break;
413 default: 402 default:
414 return -EINVAL; 403 return -EINVAL;
415 } 404 }
416 405
417 if (!(desc->status & IRQ_MASKED))
418 jz_gpio_irq_unmask(irq);
419
420 return 0; 406 return 0;
421} 407}
422 408
423static int jz_gpio_irq_set_wake(unsigned int irq, unsigned int on) 409static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
424{ 410{
425 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(irq); 411 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
426 spin_lock(&chip->lock); 412 spin_lock(&chip->lock);
427 if (on) 413 if (on)
428 chip->wakeup |= IRQ_TO_BIT(irq); 414 chip->wakeup |= IRQ_TO_BIT(data->irq);
429 else 415 else
430 chip->wakeup &= ~IRQ_TO_BIT(irq); 416 chip->wakeup &= ~IRQ_TO_BIT(data->irq);
431 spin_unlock(&chip->lock); 417 spin_unlock(&chip->lock);
432 418
433 set_irq_wake(chip->irq, on); 419 irq_set_irq_wake(chip->irq, on);
434 return 0; 420 return 0;
435} 421}
436 422
423static struct irq_chip jz_gpio_irq_chip = {
424 .name = "GPIO",
425 .irq_mask = jz_gpio_irq_mask,
426 .irq_unmask = jz_gpio_irq_unmask,
427 .irq_ack = jz_gpio_irq_ack,
428 .irq_startup = jz_gpio_irq_startup,
429 .irq_shutdown = jz_gpio_irq_shutdown,
430 .irq_set_type = jz_gpio_irq_set_type,
431 .irq_set_wake = jz_gpio_irq_set_wake,
432 .flags = IRQCHIP_SET_TYPE_MASKED,
433};
434
437/* 435/*
438 * This lock class tells lockdep that GPIO irqs are in a different 436 * This lock class tells lockdep that GPIO irqs are in a different
439 * category than their parents, so it won't report false recursion. 437 * category than their parents, so it won't report false recursion.
@@ -452,16 +450,6 @@ static struct lock_class_key gpio_lock_class;
452 .base = JZ4740_GPIO_BASE_ ## _bank, \ 450 .base = JZ4740_GPIO_BASE_ ## _bank, \
453 .ngpio = JZ4740_GPIO_NUM_ ## _bank, \ 451 .ngpio = JZ4740_GPIO_NUM_ ## _bank, \
454 }, \ 452 }, \
455 .irq_chip = { \
456 .name = "GPIO Bank " # _bank, \
457 .mask = jz_gpio_irq_mask, \
458 .unmask = jz_gpio_irq_unmask, \
459 .ack = jz_gpio_irq_ack, \
460 .startup = jz_gpio_irq_startup, \
461 .shutdown = jz_gpio_irq_shutdown, \
462 .set_type = jz_gpio_irq_set_type, \
463 .set_wake = jz_gpio_irq_set_wake, \
464 }, \
465} 453}
466 454
467static struct jz_gpio_chip jz4740_gpio_chips[] = { 455static struct jz_gpio_chip jz4740_gpio_chips[] = {
@@ -522,13 +510,14 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
522 gpiochip_add(&chip->gpio_chip); 510 gpiochip_add(&chip->gpio_chip);
523 511
524 chip->irq = JZ4740_IRQ_INTC_GPIO(id); 512 chip->irq = JZ4740_IRQ_INTC_GPIO(id);
525 set_irq_data(chip->irq, chip); 513 irq_set_handler_data(chip->irq, chip);
526 set_irq_chained_handler(chip->irq, jz_gpio_irq_demux_handler); 514 irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
527 515
528 for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) { 516 for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) {
529 lockdep_set_class(&irq_desc[irq].lock, &gpio_lock_class); 517 irq_set_lockdep_class(irq, &gpio_lock_class);
530 set_irq_chip_data(irq, chip); 518 irq_set_chip_data(irq, chip);
531 set_irq_chip_and_handler(irq, &chip->irq_chip, handle_level_irq); 519 irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
520 handle_level_irq);
532 } 521 }
533 522
534 return 0; 523 return 0;
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index 7d33ff83580..d82c0c430e0 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -43,32 +43,37 @@ static uint32_t jz_intc_saved;
43 43
44#define IRQ_BIT(x) BIT((x) - JZ4740_IRQ_BASE) 44#define IRQ_BIT(x) BIT((x) - JZ4740_IRQ_BASE)
45 45
46static void intc_irq_unmask(unsigned int irq) 46static inline unsigned long intc_irq_bit(struct irq_data *data)
47{ 47{
48 writel(IRQ_BIT(irq), jz_intc_base + JZ_REG_INTC_CLEAR_MASK); 48 return (unsigned long)irq_data_get_irq_chip_data(data);
49} 49}
50 50
51static void intc_irq_mask(unsigned int irq) 51static void intc_irq_unmask(struct irq_data *data)
52{ 52{
53 writel(IRQ_BIT(irq), jz_intc_base + JZ_REG_INTC_SET_MASK); 53 writel(intc_irq_bit(data), jz_intc_base + JZ_REG_INTC_CLEAR_MASK);
54} 54}
55 55
56static int intc_irq_set_wake(unsigned int irq, unsigned int on) 56static void intc_irq_mask(struct irq_data *data)
57{
58 writel(intc_irq_bit(data), jz_intc_base + JZ_REG_INTC_SET_MASK);
59}
60
61static int intc_irq_set_wake(struct irq_data *data, unsigned int on)
57{ 62{
58 if (on) 63 if (on)
59 jz_intc_wakeup |= IRQ_BIT(irq); 64 jz_intc_wakeup |= intc_irq_bit(data);
60 else 65 else
61 jz_intc_wakeup &= ~IRQ_BIT(irq); 66 jz_intc_wakeup &= ~intc_irq_bit(data);
62 67
63 return 0; 68 return 0;
64} 69}
65 70
66static struct irq_chip intc_irq_type = { 71static struct irq_chip intc_irq_type = {
67 .name = "INTC", 72 .name = "INTC",
68 .mask = intc_irq_mask, 73 .irq_mask = intc_irq_mask,
69 .mask_ack = intc_irq_mask, 74 .irq_mask_ack = intc_irq_mask,
70 .unmask = intc_irq_unmask, 75 .irq_unmask = intc_irq_unmask,
71 .set_wake = intc_irq_set_wake, 76 .irq_set_wake = intc_irq_set_wake,
72}; 77};
73 78
74static irqreturn_t jz4740_cascade(int irq, void *data) 79static irqreturn_t jz4740_cascade(int irq, void *data)
@@ -95,9 +100,12 @@ void __init arch_init_irq(void)
95 100
96 jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14); 101 jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14);
97 102
103 /* Mask all irqs */
104 writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK);
105
98 for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) { 106 for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) {
99 intc_irq_mask(i); 107 irq_set_chip_data(i, (void *)IRQ_BIT(i));
100 set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); 108 irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
101 } 109 }
102 110
103 setup_irq(2, &jz4740_cascade_action); 111 setup_irq(2, &jz4740_cascade_action);
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 1cc9e544d16..10929e2bc6d 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -289,3 +289,19 @@ void jz4740_serial_device_register(void)
289 289
290 platform_device_register(&jz4740_uart_device); 290 platform_device_register(&jz4740_uart_device);
291} 291}
292
293/* Watchdog */
294static struct resource jz4740_wdt_resources[] = {
295 {
296 .start = JZ4740_WDT_BASE_ADDR,
297 .end = JZ4740_WDT_BASE_ADDR + 0x10 - 1,
298 .flags = IORESOURCE_MEM,
299 },
300};
301
302struct platform_device jz4740_wdt_device = {
303 .name = "jz4740-wdt",
304 .id = -1,
305 .num_resources = ARRAY_SIZE(jz4740_wdt_resources),
306 .resource = jz4740_wdt_resources,
307};
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index b8bb8ba6086..f305ca14351 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -73,7 +73,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
73 : "0" (5), "1" (8), "2" (5)); 73 : "0" (5), "1" (8), "2" (5));
74 align_mod(align, mod); 74 align_mod(align, mod);
75 /* 75 /*
76 * The trailing nop is needed to fullfill the two-instruction 76 * The trailing nop is needed to fulfill the two-instruction
77 * requirement between reading hi/lo and staring a mult/div. 77 * requirement between reading hi/lo and staring a mult/div.
78 * Leaving it out may cause gas insert a nop itself breaking 78 * Leaving it out may cause gas insert a nop itself breaking
79 * the desired alignment of the next chunk. 79 * the desired alignment of the next chunk.
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f1123..94ca2b018af 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,13 @@
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/uasm.h> 18#include <asm/uasm.h>
19 19
20/* 20#include <asm-generic/sections.h>
21 * If the Instruction Pointer is in module space (0xc0000000), return true;
22 * otherwise, it is in kernel space (0x80000000), return false.
23 *
24 * FIXME: This will not work when the kernel space and module space are the
25 * same. If they are the same, we need to modify scripts/recordmcount.pl,
26 * ftrace_make_nop/call() and the other related parts to ensure the
27 * enabling/disabling of the calling site to _mcount is right for both kernel
28 * and module.
29 */
30
31static inline int in_module(unsigned long ip)
32{
33 return ip & 0x40000000;
34}
35 21
36#ifdef CONFIG_DYNAMIC_FTRACE 22#ifdef CONFIG_DYNAMIC_FTRACE
37 23
38#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
39#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
40 26
41#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
43#define INSN_NOP 0x00000000 /* nop */ 27#define INSN_NOP 0x00000000 /* nop */
44#define INSN_JAL(addr) \ 28#define INSN_JAL(addr) \
45 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
69#endif 53#endif
70} 54}
71 55
56/*
57 * Check if the address is in kernel space
58 *
59 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
60 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
61 */
62static inline int in_kernel_space(unsigned long ip)
63{
64 if (ip >= (unsigned long)_stext &&
65 ip <= (unsigned long)_etext)
66 return 1;
67 return 0;
68}
69
72static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 70static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73{ 71{
74 int faulted; 72 int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84 return 0; 82 return 0;
85} 83}
86 84
85/*
86 * The details about the calling site of mcount on MIPS
87 *
88 * 1. For kernel:
89 *
90 * move at, ra
91 * jal _mcount --> nop
92 *
93 * 2. For modules:
94 *
95 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
96 *
97 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
98 * addiu v1, v1, low_16bit_of_mcount
99 * move at, ra
100 * move $12, ra_address
101 * jalr v1
102 * sub sp, sp, 8
103 * 1: offset = 5 instructions
104 * 2.2 For the Other situations
105 *
106 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
107 * addiu v1, v1, low_16bit_of_mcount
108 * move at, ra
109 * jalr v1
110 * nop | move $12, ra_address | sub sp, sp, 8
111 * 1: offset = 4 instructions
112 */
113
114#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
115#define MCOUNT_OFFSET_INSNS 5
116#else
117#define MCOUNT_OFFSET_INSNS 4
118#endif
119#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
120
87int ftrace_make_nop(struct module *mod, 121int ftrace_make_nop(struct module *mod,
88 struct dyn_ftrace *rec, unsigned long addr) 122 struct dyn_ftrace *rec, unsigned long addr)
89{ 123{
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
91 unsigned long ip = rec->ip; 125 unsigned long ip = rec->ip;
92 126
93 /* 127 /*
94 * We have compiled module with -mlong-calls, but compiled the kernel 128 * If ip is in kernel space, no long call, otherwise, long call is
95 * without it, we need to cope with them respectively. 129 * needed.
96 */ 130 */
97 if (in_module(ip)) { 131 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
98#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 132
99 /*
100 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101 * addiu v1, v1, low_16bit_of_mcount
102 * move at, ra
103 * move $12, ra_address
104 * jalr v1
105 * sub sp, sp, 8
106 * 1: offset = 5 instructions
107 */
108 new = INSN_B_1F_5;
109#else
110 /*
111 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112 * addiu v1, v1, low_16bit_of_mcount
113 * move at, ra
114 * jalr v1
115 * nop | move $12, ra_address | sub sp, sp, 8
116 * 1: offset = 4 instructions
117 */
118 new = INSN_B_1F_4;
119#endif
120 } else {
121 /*
122 * move at, ra
123 * jal _mcount --> nop
124 */
125 new = INSN_NOP;
126 }
127 return ftrace_modify_code(ip, new); 133 return ftrace_modify_code(ip, new);
128} 134}
129 135
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132 unsigned int new; 138 unsigned int new;
133 unsigned long ip = rec->ip; 139 unsigned long ip = rec->ip;
134 140
135 /* ip, module: 0xc0000000, kernel: 0x80000000 */ 141 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136 new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; 142 insn_lui_v1_hi16_mcount;
137 143
138 return ftrace_modify_code(ip, new); 144 return ftrace_modify_code(ip, new);
139} 145}
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 196#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 197#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 198
193unsigned long ftrace_get_parent_addr(unsigned long self_addr, 199unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194 unsigned long parent, 200 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 unsigned long parent_addr,
196 unsigned long fp)
197{ 201{
198 unsigned long sp, ip, ra; 202 unsigned long sp, ip, tmp;
199 unsigned int code; 203 unsigned int code;
200 int faulted; 204 int faulted;
201 205
202 /* 206 /*
203 * For module, move the ip from calling site of mcount to the 207 * For module, move the ip from the return address after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for 208 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move to the instruction "move ra, at"(offset is 12) 209 * kernel, move after the instruction "move ra, at"(offset is 16)
206 */ 210 */
207 ip = self_addr - (in_module(self_addr) ? 20 : 12); 211 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 212
209 /* 213 /*
210 * search the text until finding the non-store instruction or "s{d,w} 214 * search the text until finding the non-store instruction or "s{d,w}
211 * ra, offset(sp)" instruction 215 * ra, offset(sp)" instruction
212 */ 216 */
213 do { 217 do {
214 ip -= 4;
215
216 /* get the code at "ip": code = *(unsigned int *)ip; */ 218 /* get the code at "ip": code = *(unsigned int *)ip; */
217 safe_load_code(code, ip, faulted); 219 safe_load_code(code, ip, faulted);
218 220
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
224 * store the ra on the stack 226 * store the ra on the stack
225 */ 227 */
226 if ((code & S_R_SP) != S_R_SP) 228 if ((code & S_R_SP) != S_R_SP)
227 return parent_addr; 229 return parent_ra_addr;
228 230
229 } while (((code & S_RA_SP) != S_RA_SP)); 231 /* Move to the next instruction */
232 ip -= 4;
233 } while ((code & S_RA_SP) != S_RA_SP);
230 234
231 sp = fp + (code & OFFSET_MASK); 235 sp = fp + (code & OFFSET_MASK);
232 236
233 /* ra = *(unsigned long *)sp; */ 237 /* tmp = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted); 238 safe_load_stack(tmp, sp, faulted);
235 if (unlikely(faulted)) 239 if (unlikely(faulted))
236 return 0; 240 return 0;
237 241
238 if (ra == parent) 242 if (tmp == old_parent_ra)
239 return sp; 243 return sp;
240 return 0; 244 return 0;
241} 245}
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246 * Hook the return address and push it in the stack of return addrs 250 * Hook the return address and push it in the stack of return addrs
247 * in current thread info. 251 * in current thread info.
248 */ 252 */
249void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 253void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 unsigned long fp) 254 unsigned long fp)
251{ 255{
252 unsigned long old; 256 unsigned long old_parent_ra;
253 struct ftrace_graph_ent trace; 257 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long) 258 unsigned long return_hooker = (unsigned long)
255 &return_to_handler; 259 &return_to_handler;
256 int faulted; 260 int faulted, insns;
257 261
258 if (unlikely(atomic_read(&current->tracing_graph_pause))) 262 if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 return; 263 return;
260 264
261 /* 265 /*
262 * "parent" is the stack address saved the return address of the caller 266 * "parent_ra_addr" is the stack address saved the return address of
263 * of _mcount. 267 * the caller of _mcount.
264 * 268 *
265 * if the gcc < 4.5, a leaf function does not save the return address 269 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space, 270 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275 * do it in ftrace_graph_caller of mcount.S. 279 * do it in ftrace_graph_caller of mcount.S.
276 */ 280 */
277 281
278 /* old = *parent; */ 282 /* old_parent_ra = *parent_ra_addr; */
279 safe_load_stack(old, parent, faulted); 283 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 if (unlikely(faulted)) 284 if (unlikely(faulted))
281 goto out; 285 goto out;
282#ifndef KBUILD_MCOUNT_RA_ADDRESS 286#ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 287 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
284 (unsigned long)parent, fp); 288 old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 /* 289 /*
286 * If fails when getting the stack address of the non-leaf function's 290 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return 291 * ra, stop function graph tracer and return
288 */ 292 */
289 if (parent == 0) 293 if (parent_ra_addr == 0)
290 goto out; 294 goto out;
291#endif 295#endif
292 /* *parent = return_hooker; */ 296 /* *parent_ra_addr = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted); 297 safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 if (unlikely(faulted)) 298 if (unlikely(faulted))
295 goto out; 299 goto out;
296 300
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 301 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
298 -EBUSY) { 302 == -EBUSY) {
299 *parent = old; 303 *parent_ra_addr = old_parent_ra;
300 return; 304 return;
301 } 305 }
302 306
303 trace.func = self_addr; 307 /*
308 * Get the recorded ip of the current mcount calling site in the
309 * __mcount_loc section, which will be used to filter the function
310 * entries configured through the tracing/set_graph_function interface.
311 */
312
313 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
314 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
304 315
305 /* Only trace if the calling function expects to */ 316 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) { 317 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--; 318 current->curr_ret_stack--;
308 *parent = old; 319 *parent_ra_addr = old_parent_ra;
309 } 320 }
310 return; 321 return;
311out: 322out:
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 9fadd17888d..391221b6a6a 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -125,84 +125,11 @@ void __init setup_pit_timer(void)
125 setup_irq(0, &irq0); 125 setup_irq(0, &irq0);
126} 126}
127 127
128/*
129 * Since the PIT overflows every tick, its not very useful
130 * to just read by itself. So use jiffies to emulate a free
131 * running counter:
132 */
133static cycle_t pit_read(struct clocksource *cs)
134{
135 unsigned long flags;
136 int count;
137 u32 jifs;
138 static int old_count;
139 static u32 old_jifs;
140
141 raw_spin_lock_irqsave(&i8253_lock, flags);
142 /*
143 * Although our caller may have the read side of xtime_lock,
144 * this is now a seqlock, and we are cheating in this routine
145 * by having side effects on state that we cannot undo if
146 * there is a collision on the seqlock and our caller has to
147 * retry. (Namely, old_jifs and old_count.) So we must treat
148 * jiffies as volatile despite the lock. We read jiffies
149 * before latching the timer count to guarantee that although
150 * the jiffies value might be older than the count (that is,
151 * the counter may underflow between the last point where
152 * jiffies was incremented and the point where we latch the
153 * count), it cannot be newer.
154 */
155 jifs = jiffies;
156 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
157 count = inb_p(PIT_CH0); /* read the latched count */
158 count |= inb_p(PIT_CH0) << 8;
159
160 /* VIA686a test code... reset the latch if count > max + 1 */
161 if (count > LATCH) {
162 outb_p(0x34, PIT_MODE);
163 outb_p(LATCH & 0xff, PIT_CH0);
164 outb(LATCH >> 8, PIT_CH0);
165 count = LATCH - 1;
166 }
167
168 /*
169 * It's possible for count to appear to go the wrong way for a
170 * couple of reasons:
171 *
172 * 1. The timer counter underflows, but we haven't handled the
173 * resulting interrupt and incremented jiffies yet.
174 * 2. Hardware problem with the timer, not giving us continuous time,
175 * the counter does small "jumps" upwards on some Pentium systems,
176 * (see c't 95/10 page 335 for Neptun bug.)
177 *
178 * Previous attempts to handle these cases intelligently were
179 * buggy, so we just do the simple thing now.
180 */
181 if (count > old_count && jifs == old_jifs) {
182 count = old_count;
183 }
184 old_count = count;
185 old_jifs = jifs;
186
187 raw_spin_unlock_irqrestore(&i8253_lock, flags);
188
189 count = (LATCH - 1) - count;
190
191 return (cycle_t)(jifs * LATCH) + count;
192}
193
194static struct clocksource clocksource_pit = {
195 .name = "pit",
196 .rating = 110,
197 .read = pit_read,
198 .mask = CLOCKSOURCE_MASK(32),
199};
200
201static int __init init_pit_clocksource(void) 128static int __init init_pit_clocksource(void)
202{ 129{
203 if (num_possible_cpus() > 1) /* PIT does not scale! */ 130 if (num_possible_cpus() > 1) /* PIT does not scale! */
204 return 0; 131 return 0;
205 132
206 return clocksource_register_hz(&clocksource_pit, CLOCK_TICK_RATE); 133 return clocksource_i8253_init();
207} 134}
208arch_initcall(init_pit_clocksource); 135arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index c58176cc796..c018696765d 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -31,19 +31,19 @@
31 31
32static int i8259A_auto_eoi = -1; 32static int i8259A_auto_eoi = -1;
33DEFINE_RAW_SPINLOCK(i8259A_lock); 33DEFINE_RAW_SPINLOCK(i8259A_lock);
34static void disable_8259A_irq(unsigned int irq); 34static void disable_8259A_irq(struct irq_data *d);
35static void enable_8259A_irq(unsigned int irq); 35static void enable_8259A_irq(struct irq_data *d);
36static void mask_and_ack_8259A(unsigned int irq); 36static void mask_and_ack_8259A(struct irq_data *d);
37static void init_8259A(int auto_eoi); 37static void init_8259A(int auto_eoi);
38 38
39static struct irq_chip i8259A_chip = { 39static struct irq_chip i8259A_chip = {
40 .name = "XT-PIC", 40 .name = "XT-PIC",
41 .mask = disable_8259A_irq, 41 .irq_mask = disable_8259A_irq,
42 .disable = disable_8259A_irq, 42 .irq_disable = disable_8259A_irq,
43 .unmask = enable_8259A_irq, 43 .irq_unmask = enable_8259A_irq,
44 .mask_ack = mask_and_ack_8259A, 44 .irq_mask_ack = mask_and_ack_8259A,
45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
46 .set_affinity = plat_set_irq_affinity, 46 .irq_set_affinity = plat_set_irq_affinity,
47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
48}; 48};
49 49
@@ -59,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff;
59#define cached_master_mask (cached_irq_mask) 59#define cached_master_mask (cached_irq_mask)
60#define cached_slave_mask (cached_irq_mask >> 8) 60#define cached_slave_mask (cached_irq_mask >> 8)
61 61
62static void disable_8259A_irq(unsigned int irq) 62static void disable_8259A_irq(struct irq_data *d)
63{ 63{
64 unsigned int mask; 64 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
65 unsigned long flags; 65 unsigned long flags;
66 66
67 irq -= I8259A_IRQ_BASE;
68 mask = 1 << irq; 67 mask = 1 << irq;
69 raw_spin_lock_irqsave(&i8259A_lock, flags); 68 raw_spin_lock_irqsave(&i8259A_lock, flags);
70 cached_irq_mask |= mask; 69 cached_irq_mask |= mask;
@@ -75,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq)
75 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 74 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
76} 75}
77 76
78static void enable_8259A_irq(unsigned int irq) 77static void enable_8259A_irq(struct irq_data *d)
79{ 78{
80 unsigned int mask; 79 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
81 unsigned long flags; 80 unsigned long flags;
82 81
83 irq -= I8259A_IRQ_BASE;
84 mask = ~(1 << irq); 82 mask = ~(1 << irq);
85 raw_spin_lock_irqsave(&i8259A_lock, flags); 83 raw_spin_lock_irqsave(&i8259A_lock, flags);
86 cached_irq_mask &= mask; 84 cached_irq_mask &= mask;
@@ -112,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq)
112void make_8259A_irq(unsigned int irq) 110void make_8259A_irq(unsigned int irq)
113{ 111{
114 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
115 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 113 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
116 enable_irq(irq); 114 enable_irq(irq);
117} 115}
118 116
@@ -145,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq)
145 * first, _then_ send the EOI, and the order of EOI 143 * first, _then_ send the EOI, and the order of EOI
146 * to the two 8259s is important! 144 * to the two 8259s is important!
147 */ 145 */
148static void mask_and_ack_8259A(unsigned int irq) 146static void mask_and_ack_8259A(struct irq_data *d)
149{ 147{
150 unsigned int irqmask; 148 unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
151 unsigned long flags; 149 unsigned long flags;
152 150
153 irq -= I8259A_IRQ_BASE;
154 irqmask = 1 << irq; 151 irqmask = 1 << irq;
155 raw_spin_lock_irqsave(&i8259A_lock, flags); 152 raw_spin_lock_irqsave(&i8259A_lock, flags);
156 /* 153 /*
@@ -290,9 +287,9 @@ static void init_8259A(int auto_eoi)
290 * In AEOI mode we just have to mask the interrupt 287 * In AEOI mode we just have to mask the interrupt
291 * when acking. 288 * when acking.
292 */ 289 */
293 i8259A_chip.mask_ack = disable_8259A_irq; 290 i8259A_chip.irq_mask_ack = disable_8259A_irq;
294 else 291 else
295 i8259A_chip.mask_ack = mask_and_ack_8259A; 292 i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
296 293
297 udelay(100); /* wait for 8259A to initialize */ 294 udelay(100); /* wait for 8259A to initialize */
298 295
@@ -339,8 +336,8 @@ void __init init_i8259_irqs(void)
339 init_8259A(0); 336 init_8259A(0);
340 337
341 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { 338 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
342 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 339 irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
343 set_irq_probe(i); 340 irq_set_probe(i);
344 } 341 }
345 342
346 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); 343 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 1774271af84..0c527f65219 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -87,17 +87,10 @@ unsigned int gic_get_int(void)
87 return i; 87 return i;
88} 88}
89 89
90static unsigned int gic_irq_startup(unsigned int irq) 90static void gic_irq_ack(struct irq_data *d)
91{ 91{
92 irq -= _irqbase; 92 unsigned int irq = d->irq - _irqbase;
93 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
94 GIC_SET_INTR_MASK(irq);
95 return 0;
96}
97 93
98static void gic_irq_ack(unsigned int irq)
99{
100 irq -= _irqbase;
101 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 94 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
102 GIC_CLR_INTR_MASK(irq); 95 GIC_CLR_INTR_MASK(irq);
103 96
@@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq)
105 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); 98 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
106} 99}
107 100
108static void gic_mask_irq(unsigned int irq) 101static void gic_mask_irq(struct irq_data *d)
109{ 102{
110 irq -= _irqbase; 103 unsigned int irq = d->irq - _irqbase;
111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 104 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
112 GIC_CLR_INTR_MASK(irq); 105 GIC_CLR_INTR_MASK(irq);
113} 106}
114 107
115static void gic_unmask_irq(unsigned int irq) 108static void gic_unmask_irq(struct irq_data *d)
116{ 109{
117 irq -= _irqbase; 110 unsigned int irq = d->irq - _irqbase;
118 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
119 GIC_SET_INTR_MASK(irq); 112 GIC_SET_INTR_MASK(irq);
120} 113}
@@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq)
123 116
124static DEFINE_SPINLOCK(gic_lock); 117static DEFINE_SPINLOCK(gic_lock);
125 118
126static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 119static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
120 bool force)
127{ 121{
122 unsigned int irq = d->irq - _irqbase;
128 cpumask_t tmp = CPU_MASK_NONE; 123 cpumask_t tmp = CPU_MASK_NONE;
129 unsigned long flags; 124 unsigned long flags;
130 int i; 125 int i;
131 126
132 irq -= _irqbase;
133 pr_debug("%s(%d) called\n", __func__, irq); 127 pr_debug("%s(%d) called\n", __func__, irq);
134 cpumask_and(&tmp, cpumask, cpu_online_mask); 128 cpumask_and(&tmp, cpumask, cpu_online_mask);
135 if (cpus_empty(tmp)) 129 if (cpus_empty(tmp))
@@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
147 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 141 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
148 142
149 } 143 }
150 cpumask_copy(irq_desc[irq].affinity, cpumask); 144 cpumask_copy(d->affinity, cpumask);
151 spin_unlock_irqrestore(&gic_lock, flags); 145 spin_unlock_irqrestore(&gic_lock, flags);
152 146
153 return 0; 147 return IRQ_SET_MASK_OK_NOCOPY;
154} 148}
155#endif 149#endif
156 150
157static struct irq_chip gic_irq_controller = { 151static struct irq_chip gic_irq_controller = {
158 .name = "MIPS GIC", 152 .name = "MIPS GIC",
159 .startup = gic_irq_startup, 153 .irq_ack = gic_irq_ack,
160 .ack = gic_irq_ack, 154 .irq_mask = gic_mask_irq,
161 .mask = gic_mask_irq, 155 .irq_mask_ack = gic_mask_irq,
162 .mask_ack = gic_mask_irq, 156 .irq_unmask = gic_unmask_irq,
163 .unmask = gic_unmask_irq, 157 .irq_eoi = gic_unmask_irq,
164 .eoi = gic_unmask_irq,
165#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
166 .set_affinity = gic_set_affinity, 159 .irq_set_affinity = gic_set_affinity,
167#endif 160#endif
168}; 161};
169 162
@@ -236,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
236 vpe_local_setup(numvpes); 229 vpe_local_setup(numvpes);
237 230
238 for (i = _irqbase; i < (_irqbase + numintrs); i++) 231 for (i = _irqbase; i < (_irqbase + numintrs); i++)
239 set_irq_chip(i, &gic_irq_controller); 232 irq_set_chip(i, &gic_irq_controller);
240} 233}
241 234
242void __init gic_init(unsigned long gic_base_addr, 235void __init gic_init(unsigned long gic_base_addr,
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 42ef81461bf..883fc6cead3 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -29,64 +29,64 @@
29 29
30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); 30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
31 31
32static void ack_gt641xx_irq(unsigned int irq) 32static void ack_gt641xx_irq(struct irq_data *d)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 u32 cause; 35 u32 cause;
36 36
37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
38 cause = GT_READ(GT_INTRCAUSE_OFS); 38 cause = GT_READ(GT_INTRCAUSE_OFS);
39 cause &= ~GT641XX_IRQ_TO_BIT(irq); 39 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
40 GT_WRITE(GT_INTRCAUSE_OFS, cause); 40 GT_WRITE(GT_INTRCAUSE_OFS, cause);
41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
42} 42}
43 43
44static void mask_gt641xx_irq(unsigned int irq) 44static void mask_gt641xx_irq(struct irq_data *d)
45{ 45{
46 unsigned long flags; 46 unsigned long flags;
47 u32 mask; 47 u32 mask;
48 48
49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
50 mask = GT_READ(GT_INTRMASK_OFS); 50 mask = GT_READ(GT_INTRMASK_OFS);
51 mask &= ~GT641XX_IRQ_TO_BIT(irq); 51 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
52 GT_WRITE(GT_INTRMASK_OFS, mask); 52 GT_WRITE(GT_INTRMASK_OFS, mask);
53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
54} 54}
55 55
56static void mask_ack_gt641xx_irq(unsigned int irq) 56static void mask_ack_gt641xx_irq(struct irq_data *d)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 u32 cause, mask; 59 u32 cause, mask;
60 60
61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
62 mask = GT_READ(GT_INTRMASK_OFS); 62 mask = GT_READ(GT_INTRMASK_OFS);
63 mask &= ~GT641XX_IRQ_TO_BIT(irq); 63 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
64 GT_WRITE(GT_INTRMASK_OFS, mask); 64 GT_WRITE(GT_INTRMASK_OFS, mask);
65 65
66 cause = GT_READ(GT_INTRCAUSE_OFS); 66 cause = GT_READ(GT_INTRCAUSE_OFS);
67 cause &= ~GT641XX_IRQ_TO_BIT(irq); 67 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
68 GT_WRITE(GT_INTRCAUSE_OFS, cause); 68 GT_WRITE(GT_INTRCAUSE_OFS, cause);
69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
70} 70}
71 71
72static void unmask_gt641xx_irq(unsigned int irq) 72static void unmask_gt641xx_irq(struct irq_data *d)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 u32 mask; 75 u32 mask;
76 76
77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
78 mask = GT_READ(GT_INTRMASK_OFS); 78 mask = GT_READ(GT_INTRMASK_OFS);
79 mask |= GT641XX_IRQ_TO_BIT(irq); 79 mask |= GT641XX_IRQ_TO_BIT(d->irq);
80 GT_WRITE(GT_INTRMASK_OFS, mask); 80 GT_WRITE(GT_INTRMASK_OFS, mask);
81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
82} 82}
83 83
84static struct irq_chip gt641xx_irq_chip = { 84static struct irq_chip gt641xx_irq_chip = {
85 .name = "GT641xx", 85 .name = "GT641xx",
86 .ack = ack_gt641xx_irq, 86 .irq_ack = ack_gt641xx_irq,
87 .mask = mask_gt641xx_irq, 87 .irq_mask = mask_gt641xx_irq,
88 .mask_ack = mask_ack_gt641xx_irq, 88 .irq_mask_ack = mask_ack_gt641xx_irq,
89 .unmask = unmask_gt641xx_irq, 89 .irq_unmask = unmask_gt641xx_irq,
90}; 90};
91 91
92void gt641xx_irq_dispatch(void) 92void gt641xx_irq_dispatch(void)
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void)
126 * bit31: logical or of bits[25:1]. 126 * bit31: logical or of bits[25:1].
127 */ 127 */
128 for (i = 1; i < 30; i++) 128 for (i = 1; i < 30; i++)
129 set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, 129 irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
130 &gt641xx_irq_chip, handle_level_irq); 130 &gt641xx_irq_chip, handle_level_irq);
131} 131}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 6a8cd28133d..0c6afeed89d 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -28,8 +28,10 @@ static unsigned long _icctrl_msc;
28static unsigned int irq_base; 28static unsigned int irq_base;
29 29
30/* mask off an interrupt */ 30/* mask off an interrupt */
31static inline void mask_msc_irq(unsigned int irq) 31static inline void mask_msc_irq(struct irq_data *d)
32{ 32{
33 unsigned int irq = d->irq;
34
33 if (irq < (irq_base + 32)) 35 if (irq < (irq_base + 32))
34 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); 36 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
35 else 37 else
@@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq)
37} 39}
38 40
39/* unmask an interrupt */ 41/* unmask an interrupt */
40static inline void unmask_msc_irq(unsigned int irq) 42static inline void unmask_msc_irq(struct irq_data *d)
41{ 43{
44 unsigned int irq = d->irq;
45
42 if (irq < (irq_base + 32)) 46 if (irq < (irq_base + 32))
43 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); 47 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
44 else 48 else
@@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq)
48/* 52/*
49 * Masks and ACKs an IRQ 53 * Masks and ACKs an IRQ
50 */ 54 */
51static void level_mask_and_ack_msc_irq(unsigned int irq) 55static void level_mask_and_ack_msc_irq(struct irq_data *d)
52{ 56{
53 mask_msc_irq(irq); 57 unsigned int irq = d->irq;
58
59 mask_msc_irq(d);
54 if (!cpu_has_veic) 60 if (!cpu_has_veic)
55 MSCIC_WRITE(MSC01_IC_EOI, 0); 61 MSCIC_WRITE(MSC01_IC_EOI, 0);
56 /* This actually needs to be a call into platform code */ 62 /* This actually needs to be a call into platform code */
@@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
60/* 66/*
61 * Masks and ACKs an IRQ 67 * Masks and ACKs an IRQ
62 */ 68 */
63static void edge_mask_and_ack_msc_irq(unsigned int irq) 69static void edge_mask_and_ack_msc_irq(struct irq_data *d)
64{ 70{
65 mask_msc_irq(irq); 71 unsigned int irq = d->irq;
72
73 mask_msc_irq(d);
66 if (!cpu_has_veic) 74 if (!cpu_has_veic)
67 MSCIC_WRITE(MSC01_IC_EOI, 0); 75 MSCIC_WRITE(MSC01_IC_EOI, 0);
68 else { 76 else {
@@ -75,15 +83,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
75} 83}
76 84
77/* 85/*
78 * End IRQ processing
79 */
80static void end_msc_irq(unsigned int irq)
81{
82 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
83 unmask_msc_irq(irq);
84}
85
86/*
87 * Interrupt handler for interrupts coming from SOC-it. 86 * Interrupt handler for interrupts coming from SOC-it.
88 */ 87 */
89void ll_msc_irq(void) 88void ll_msc_irq(void)
@@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set)
107 106
108static struct irq_chip msc_levelirq_type = { 107static struct irq_chip msc_levelirq_type = {
109 .name = "SOC-it-Level", 108 .name = "SOC-it-Level",
110 .ack = level_mask_and_ack_msc_irq, 109 .irq_ack = level_mask_and_ack_msc_irq,
111 .mask = mask_msc_irq, 110 .irq_mask = mask_msc_irq,
112 .mask_ack = level_mask_and_ack_msc_irq, 111 .irq_mask_ack = level_mask_and_ack_msc_irq,
113 .unmask = unmask_msc_irq, 112 .irq_unmask = unmask_msc_irq,
114 .eoi = unmask_msc_irq, 113 .irq_eoi = unmask_msc_irq,
115 .end = end_msc_irq,
116}; 114};
117 115
118static struct irq_chip msc_edgeirq_type = { 116static struct irq_chip msc_edgeirq_type = {
119 .name = "SOC-it-Edge", 117 .name = "SOC-it-Edge",
120 .ack = edge_mask_and_ack_msc_irq, 118 .irq_ack = edge_mask_and_ack_msc_irq,
121 .mask = mask_msc_irq, 119 .irq_mask = mask_msc_irq,
122 .mask_ack = edge_mask_and_ack_msc_irq, 120 .irq_mask_ack = edge_mask_and_ack_msc_irq,
123 .unmask = unmask_msc_irq, 121 .irq_unmask = unmask_msc_irq,
124 .eoi = unmask_msc_irq, 122 .irq_eoi = unmask_msc_irq,
125 .end = end_msc_irq,
126}; 123};
127 124
128 125
@@ -140,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
140 137
141 switch (imp->im_type) { 138 switch (imp->im_type) {
142 case MSC01_IRQ_EDGE: 139 case MSC01_IRQ_EDGE:
143 set_irq_chip_and_handler_name(irqbase + n, 140 irq_set_chip_and_handler_name(irqbase + n,
144 &msc_edgeirq_type, handle_edge_irq, "edge"); 141 &msc_edgeirq_type,
142 handle_edge_irq,
143 "edge");
145 if (cpu_has_veic) 144 if (cpu_has_veic)
146 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 145 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
147 else 146 else
148 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 147 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
149 break; 148 break;
150 case MSC01_IRQ_LEVEL: 149 case MSC01_IRQ_LEVEL:
151 set_irq_chip_and_handler_name(irqbase+n, 150 irq_set_chip_and_handler_name(irqbase + n,
152 &msc_levelirq_type, handle_level_irq, "level"); 151 &msc_levelirq_type,
152 handle_level_irq,
153 "level");
153 if (cpu_has_veic) 154 if (cpu_has_veic)
154 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
155 else 156 else
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 9731e8b4786..a8a8977d588 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -18,23 +18,23 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static inline void unmask_rm7k_irq(unsigned int irq) 21static inline void unmask_rm7k_irq(struct irq_data *d)
22{ 22{
23 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 23 set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
24} 24}
25 25
26static inline void mask_rm7k_irq(unsigned int irq) 26static inline void mask_rm7k_irq(struct irq_data *d)
27{ 27{
28 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 28 clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
29} 29}
30 30
31static struct irq_chip rm7k_irq_controller = { 31static struct irq_chip rm7k_irq_controller = {
32 .name = "RM7000", 32 .name = "RM7000",
33 .ack = mask_rm7k_irq, 33 .irq_ack = mask_rm7k_irq,
34 .mask = mask_rm7k_irq, 34 .irq_mask = mask_rm7k_irq,
35 .mask_ack = mask_rm7k_irq, 35 .irq_mask_ack = mask_rm7k_irq,
36 .unmask = unmask_rm7k_irq, 36 .irq_unmask = unmask_rm7k_irq,
37 .eoi = unmask_rm7k_irq 37 .irq_eoi = unmask_rm7k_irq
38}; 38};
39 39
40void __init rm7k_cpu_irq_init(void) 40void __init rm7k_cpu_irq_init(void)
@@ -45,6 +45,6 @@ void __init rm7k_cpu_irq_init(void)
45 clear_c0_intcontrol(0x00000f00); /* Mask all */ 45 clear_c0_intcontrol(0x00000f00); /* Mask all */
46 46
47 for (i = base; i < base + 4; i++) 47 for (i = base; i < base + 4; i++)
48 set_irq_chip_and_handler(i, &rm7k_irq_controller, 48 irq_set_chip_and_handler(i, &rm7k_irq_controller,
49 handle_percpu_irq); 49 handle_percpu_irq);
50} 50}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index b7e4025b58a..38874a4b925 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -19,22 +19,22 @@
19#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
20#include <asm/system.h> 20#include <asm/system.h>
21 21
22static inline void unmask_rm9k_irq(unsigned int irq) 22static inline void unmask_rm9k_irq(struct irq_data *d)
23{ 23{
24 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 24 set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
25} 25}
26 26
27static inline void mask_rm9k_irq(unsigned int irq) 27static inline void mask_rm9k_irq(struct irq_data *d)
28{ 28{
29 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 29 clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
30} 30}
31 31
32static inline void rm9k_cpu_irq_enable(unsigned int irq) 32static inline void rm9k_cpu_irq_enable(struct irq_data *d)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 35
36 local_irq_save(flags); 36 local_irq_save(flags);
37 unmask_rm9k_irq(irq); 37 unmask_rm9k_irq(d);
38 local_irq_restore(flags); 38 local_irq_restore(flags);
39} 39}
40 40
@@ -43,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
43 */ 43 */
44static void local_rm9k_perfcounter_irq_startup(void *args) 44static void local_rm9k_perfcounter_irq_startup(void *args)
45{ 45{
46 unsigned int irq = (unsigned int) args; 46 rm9k_cpu_irq_enable(args);
47
48 rm9k_cpu_irq_enable(irq);
49} 47}
50 48
51static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) 49static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
52{ 50{
53 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); 51 on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
54 52
55 return 0; 53 return 0;
56} 54}
57 55
58static void local_rm9k_perfcounter_irq_shutdown(void *args) 56static void local_rm9k_perfcounter_irq_shutdown(void *args)
59{ 57{
60 unsigned int irq = (unsigned int) args;
61 unsigned long flags; 58 unsigned long flags;
62 59
63 local_irq_save(flags); 60 local_irq_save(flags);
64 mask_rm9k_irq(irq); 61 mask_rm9k_irq(args);
65 local_irq_restore(flags); 62 local_irq_restore(flags);
66} 63}
67 64
68static void rm9k_perfcounter_irq_shutdown(unsigned int irq) 65static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
69{ 66{
70 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); 67 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
71} 68}
72 69
73static struct irq_chip rm9k_irq_controller = { 70static struct irq_chip rm9k_irq_controller = {
74 .name = "RM9000", 71 .name = "RM9000",
75 .ack = mask_rm9k_irq, 72 .irq_ack = mask_rm9k_irq,
76 .mask = mask_rm9k_irq, 73 .irq_mask = mask_rm9k_irq,
77 .mask_ack = mask_rm9k_irq, 74 .irq_mask_ack = mask_rm9k_irq,
78 .unmask = unmask_rm9k_irq, 75 .irq_unmask = unmask_rm9k_irq,
79 .eoi = unmask_rm9k_irq 76 .irq_eoi = unmask_rm9k_irq
80}; 77};
81 78
82static struct irq_chip rm9k_perfcounter_irq = { 79static struct irq_chip rm9k_perfcounter_irq = {
83 .name = "RM9000", 80 .name = "RM9000",
84 .startup = rm9k_perfcounter_irq_startup, 81 .irq_startup = rm9k_perfcounter_irq_startup,
85 .shutdown = rm9k_perfcounter_irq_shutdown, 82 .irq_shutdown = rm9k_perfcounter_irq_shutdown,
86 .ack = mask_rm9k_irq, 83 .irq_ack = mask_rm9k_irq,
87 .mask = mask_rm9k_irq, 84 .irq_mask = mask_rm9k_irq,
88 .mask_ack = mask_rm9k_irq, 85 .irq_mask_ack = mask_rm9k_irq,
89 .unmask = unmask_rm9k_irq, 86 .irq_unmask = unmask_rm9k_irq,
90}; 87};
91 88
92unsigned int rm9000_perfcount_irq; 89unsigned int rm9000_perfcount_irq;
@@ -101,10 +98,10 @@ void __init rm9k_cpu_irq_init(void)
101 clear_c0_intcontrol(0x0000f000); /* Mask all */ 98 clear_c0_intcontrol(0x0000f000); /* Mask all */
102 99
103 for (i = base; i < base + 4; i++) 100 for (i = base; i < base + 4; i++)
104 set_irq_chip_and_handler(i, &rm9k_irq_controller, 101 irq_set_chip_and_handler(i, &rm9k_irq_controller,
105 handle_level_irq); 102 handle_level_irq);
106 103
107 rm9000_perfcount_irq = base + 1; 104 rm9000_perfcount_irq = base + 1;
108 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 105 irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
109 handle_percpu_irq); 106 handle_percpu_irq);
110} 107}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 4f93db58a79..9b734d74ae8 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq)
81 81
82atomic_t irq_err_count; 82atomic_t irq_err_count;
83 83
84/* 84int arch_show_interrupts(struct seq_file *p, int prec)
85 * Generic, controller-independent functions:
86 */
87
88int show_interrupts(struct seq_file *p, void *v)
89{ 85{
90 int i = *(loff_t *) v, j; 86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
91 struct irqaction * action;
92 unsigned long flags;
93
94 if (i == 0) {
95 seq_printf(p, " ");
96 for_each_online_cpu(j)
97 seq_printf(p, "CPU%d ", j);
98 seq_putc(p, '\n');
99 }
100
101 if (i < NR_IRQS) {
102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action;
104 if (!action)
105 goto skip;
106 seq_printf(p, "%3d: ", i);
107#ifndef CONFIG_SMP
108 seq_printf(p, "%10u ", kstat_irqs(i));
109#else
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, " %s", action->name);
115
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
118
119 seq_putc(p, '\n');
120skip:
121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
125 }
126 return 0; 87 return 0;
127} 88}
128 89
@@ -141,7 +102,7 @@ void __init init_IRQ(void)
141#endif 102#endif
142 103
143 for (i = 0; i < NR_IRQS; i++) 104 for (i = 0; i < NR_IRQS; i++)
144 set_irq_noprobe(i); 105 irq_set_noprobe(i);
145 106
146 arch_init_irq(); 107 arch_init_irq();
147 108
@@ -183,8 +144,8 @@ void __irq_entry do_IRQ(unsigned int irq)
183{ 144{
184 irq_enter(); 145 irq_enter();
185 check_stack_overflow(); 146 check_stack_overflow();
186 __DO_IRQ_SMTC_HOOK(irq); 147 if (!smtc_handle_on_other_cpu(irq))
187 generic_handle_irq(irq); 148 generic_handle_irq(irq);
188 irq_exit(); 149 irq_exit();
189} 150}
190 151
@@ -197,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq)
197void __irq_entry do_IRQ_no_affinity(unsigned int irq) 158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
198{ 159{
199 irq_enter(); 160 irq_enter();
200 __NO_AFFINITY_IRQ_SMTC_HOOK(irq); 161 smtc_im_backstop(irq);
201 generic_handle_irq(irq); 162 generic_handle_irq(irq);
202 irq_exit(); 163 irq_exit();
203} 164}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 0262abe0912..6e71b284f6c 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -37,42 +37,38 @@
37#include <asm/mipsmtregs.h> 37#include <asm/mipsmtregs.h>
38#include <asm/system.h> 38#include <asm/system.h>
39 39
40static inline void unmask_mips_irq(unsigned int irq) 40static inline void unmask_mips_irq(struct irq_data *d)
41{ 41{
42 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 42 set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
43 irq_enable_hazard(); 43 irq_enable_hazard();
44} 44}
45 45
46static inline void mask_mips_irq(unsigned int irq) 46static inline void mask_mips_irq(struct irq_data *d)
47{ 47{
48 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 48 clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
49 irq_disable_hazard(); 49 irq_disable_hazard();
50} 50}
51 51
52static struct irq_chip mips_cpu_irq_controller = { 52static struct irq_chip mips_cpu_irq_controller = {
53 .name = "MIPS", 53 .name = "MIPS",
54 .ack = mask_mips_irq, 54 .irq_ack = mask_mips_irq,
55 .mask = mask_mips_irq, 55 .irq_mask = mask_mips_irq,
56 .mask_ack = mask_mips_irq, 56 .irq_mask_ack = mask_mips_irq,
57 .unmask = unmask_mips_irq, 57 .irq_unmask = unmask_mips_irq,
58 .eoi = unmask_mips_irq, 58 .irq_eoi = unmask_mips_irq,
59}; 59};
60 60
61/* 61/*
62 * Basically the same as above but taking care of all the MT stuff 62 * Basically the same as above but taking care of all the MT stuff
63 */ 63 */
64 64
65#define unmask_mips_mt_irq unmask_mips_irq 65static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
66#define mask_mips_mt_irq mask_mips_irq
67
68static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
69{ 66{
70 unsigned int vpflags = dvpe(); 67 unsigned int vpflags = dvpe();
71 68
72 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 69 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
73 evpe(vpflags); 70 evpe(vpflags);
74 unmask_mips_mt_irq(irq); 71 unmask_mips_irq(d);
75
76 return 0; 72 return 0;
77} 73}
78 74
@@ -80,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
80 * While we ack the interrupt interrupts are disabled and thus we don't need 76 * While we ack the interrupt interrupts are disabled and thus we don't need
81 * to deal with concurrency issues. Same for mips_cpu_irq_end. 77 * to deal with concurrency issues. Same for mips_cpu_irq_end.
82 */ 78 */
83static void mips_mt_cpu_irq_ack(unsigned int irq) 79static void mips_mt_cpu_irq_ack(struct irq_data *d)
84{ 80{
85 unsigned int vpflags = dvpe(); 81 unsigned int vpflags = dvpe();
86 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 82 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
87 evpe(vpflags); 83 evpe(vpflags);
88 mask_mips_mt_irq(irq); 84 mask_mips_irq(d);
89} 85}
90 86
91static struct irq_chip mips_mt_cpu_irq_controller = { 87static struct irq_chip mips_mt_cpu_irq_controller = {
92 .name = "MIPS", 88 .name = "MIPS",
93 .startup = mips_mt_cpu_irq_startup, 89 .irq_startup = mips_mt_cpu_irq_startup,
94 .ack = mips_mt_cpu_irq_ack, 90 .irq_ack = mips_mt_cpu_irq_ack,
95 .mask = mask_mips_mt_irq, 91 .irq_mask = mask_mips_irq,
96 .mask_ack = mips_mt_cpu_irq_ack, 92 .irq_mask_ack = mips_mt_cpu_irq_ack,
97 .unmask = unmask_mips_mt_irq, 93 .irq_unmask = unmask_mips_irq,
98 .eoi = unmask_mips_mt_irq, 94 .irq_eoi = unmask_mips_irq,
99}; 95};
100 96
101void __init mips_cpu_irq_init(void) 97void __init mips_cpu_irq_init(void)
@@ -113,10 +109,10 @@ void __init mips_cpu_irq_init(void)
113 */ 109 */
114 if (cpu_has_mipsmt) 110 if (cpu_has_mipsmt)
115 for (i = irq_base; i < irq_base + 2; i++) 111 for (i = irq_base; i < irq_base + 2; i++)
116 set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, 112 irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
117 handle_percpu_irq); 113 handle_percpu_irq);
118 114
119 for (i = irq_base + 2; i < irq_base + 8; i++) 115 for (i = irq_base + 2; i < irq_base + 8; i++)
120 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 116 irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
121 handle_percpu_irq); 117 handle_percpu_irq);
122} 118}
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index 95a96f69172..b0c55b50218 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -63,9 +63,9 @@ static struct {
63 unsigned char mode; 63 unsigned char mode;
64} txx9irq[TXx9_MAX_IR] __read_mostly; 64} txx9irq[TXx9_MAX_IR] __read_mostly;
65 65
66static void txx9_irq_unmask(unsigned int irq) 66static void txx9_irq_unmask(struct irq_data *d)
67{ 67{
68 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 68 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
69 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; 69 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
70 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 70 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
71 71
@@ -79,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq)
79#endif 79#endif
80} 80}
81 81
82static inline void txx9_irq_mask(unsigned int irq) 82static inline void txx9_irq_mask(struct irq_data *d)
83{ 83{
84 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 84 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
85 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; 85 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
86 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 86 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
87 87
@@ -99,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq)
99#endif 99#endif
100} 100}
101 101
102static void txx9_irq_mask_ack(unsigned int irq) 102static void txx9_irq_mask_ack(struct irq_data *d)
103{ 103{
104 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 104 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
105 105
106 txx9_irq_mask(irq); 106 txx9_irq_mask(d);
107 /* clear edge detection */ 107 /* clear edge detection */
108 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) 108 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
109 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); 109 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
110} 110}
111 111
112static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) 112static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
113{ 113{
114 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 114 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
115 u32 cr; 115 u32 cr;
116 u32 __iomem *crp; 116 u32 __iomem *crp;
117 int ofs; 117 int ofs;
@@ -139,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
139 139
140static struct irq_chip txx9_irq_chip = { 140static struct irq_chip txx9_irq_chip = {
141 .name = "TXX9", 141 .name = "TXX9",
142 .ack = txx9_irq_mask_ack, 142 .irq_ack = txx9_irq_mask_ack,
143 .mask = txx9_irq_mask, 143 .irq_mask = txx9_irq_mask,
144 .mask_ack = txx9_irq_mask_ack, 144 .irq_mask_ack = txx9_irq_mask_ack,
145 .unmask = txx9_irq_unmask, 145 .irq_unmask = txx9_irq_unmask,
146 .set_type = txx9_irq_set_type, 146 .irq_set_type = txx9_irq_set_type,
147}; 147};
148 148
149void __init txx9_irq_init(unsigned long baseaddr) 149void __init txx9_irq_init(unsigned long baseaddr)
@@ -154,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr)
154 for (i = 0; i < TXx9_MAX_IR; i++) { 154 for (i = 0; i < TXx9_MAX_IR; i++) {
155 txx9irq[i].level = 4; /* middle level */ 155 txx9irq[i].level = 4; /* middle level */
156 txx9irq[i].mode = TXx9_IRCR_LOW; 156 txx9irq[i].mode = TXx9_IRCR_LOW;
157 set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 157 irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
158 &txx9_irq_chip, handle_level_irq); 158 handle_level_irq);
159 } 159 }
160 160
161 /* mask all IRC interrupts */ 161 /* mask all IRC interrupts */
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 2b7f3f703b8..a8244854d3d 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
161 return ret; 161 return ret;
162} 162}
163 163
164static int mipspmu_enable(struct perf_event *event)
165{
166 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167 struct hw_perf_event *hwc = &event->hw;
168 int idx;
169 int err = 0;
170
171 /* To look for a free counter for this event. */
172 idx = mipspmu->alloc_counter(cpuc, hwc);
173 if (idx < 0) {
174 err = idx;
175 goto out;
176 }
177
178 /*
179 * If there is an event in the counter we are going to use then
180 * make sure it is disabled.
181 */
182 event->hw.idx = idx;
183 mipspmu->disable_event(idx);
184 cpuc->events[idx] = event;
185
186 /* Set the period for the event. */
187 mipspmu_event_set_period(event, hwc, idx);
188
189 /* Enable the event. */
190 mipspmu->enable_event(hwc, idx);
191
192 /* Propagate our changes to the userspace mapping. */
193 perf_event_update_userpage(event);
194
195out:
196 return err;
197}
198
199static void mipspmu_event_update(struct perf_event *event, 164static void mipspmu_event_update(struct perf_event *event,
200 struct hw_perf_event *hwc, 165 struct hw_perf_event *hwc,
201 int idx) 166 int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
204 unsigned long flags; 169 unsigned long flags;
205 int shift = 64 - TOTAL_BITS; 170 int shift = 64 - TOTAL_BITS;
206 s64 prev_raw_count, new_raw_count; 171 s64 prev_raw_count, new_raw_count;
207 s64 delta; 172 u64 delta;
208 173
209again: 174again:
210 prev_raw_count = local64_read(&hwc->prev_count); 175 prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
231 return; 196 return;
232} 197}
233 198
234static void mipspmu_disable(struct perf_event *event) 199static void mipspmu_start(struct perf_event *event, int flags)
200{
201 struct hw_perf_event *hwc = &event->hw;
202
203 if (!mipspmu)
204 return;
205
206 if (flags & PERF_EF_RELOAD)
207 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
208
209 hwc->state = 0;
210
211 /* Set the period for the event. */
212 mipspmu_event_set_period(event, hwc, hwc->idx);
213
214 /* Enable the event. */
215 mipspmu->enable_event(hwc, hwc->idx);
216}
217
218static void mipspmu_stop(struct perf_event *event, int flags)
219{
220 struct hw_perf_event *hwc = &event->hw;
221
222 if (!mipspmu)
223 return;
224
225 if (!(hwc->state & PERF_HES_STOPPED)) {
226 /* We are working on a local event. */
227 mipspmu->disable_event(hwc->idx);
228 barrier();
229 mipspmu_event_update(event, hwc, hwc->idx);
230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
231 }
232}
233
234static int mipspmu_add(struct perf_event *event, int flags)
235{ 235{
236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 struct hw_perf_event *hwc = &event->hw; 237 struct hw_perf_event *hwc = &event->hw;
238 int idx = hwc->idx; 238 int idx;
239 int err = 0;
239 240
241 perf_pmu_disable(event->pmu);
240 242
241 WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 243 /* To look for a free counter for this event. */
244 idx = mipspmu->alloc_counter(cpuc, hwc);
245 if (idx < 0) {
246 err = idx;
247 goto out;
248 }
242 249
243 /* We are working on a local event. */ 250 /*
251 * If there is an event in the counter we are going to use then
252 * make sure it is disabled.
253 */
254 event->hw.idx = idx;
244 mipspmu->disable_event(idx); 255 mipspmu->disable_event(idx);
256 cpuc->events[idx] = event;
245 257
246 barrier(); 258 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
247 259 if (flags & PERF_EF_START)
248 mipspmu_event_update(event, hwc, idx); 260 mipspmu_start(event, PERF_EF_RELOAD);
249 cpuc->events[idx] = NULL;
250 clear_bit(idx, cpuc->used_mask);
251 261
262 /* Propagate our changes to the userspace mapping. */
252 perf_event_update_userpage(event); 263 perf_event_update_userpage(event);
264
265out:
266 perf_pmu_enable(event->pmu);
267 return err;
253} 268}
254 269
255static void mipspmu_unthrottle(struct perf_event *event) 270static void mipspmu_del(struct perf_event *event, int flags)
256{ 271{
272 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
257 struct hw_perf_event *hwc = &event->hw; 273 struct hw_perf_event *hwc = &event->hw;
274 int idx = hwc->idx;
258 275
259 mipspmu->enable_event(hwc, hwc->idx); 276 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
277
278 mipspmu_stop(event, PERF_EF_UPDATE);
279 cpuc->events[idx] = NULL;
280 clear_bit(idx, cpuc->used_mask);
281
282 perf_event_update_userpage(event);
260} 283}
261 284
262static void mipspmu_read(struct perf_event *event) 285static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
270 mipspmu_event_update(event, hwc, hwc->idx); 293 mipspmu_event_update(event, hwc, hwc->idx);
271} 294}
272 295
273static struct pmu pmu = { 296static void mipspmu_enable(struct pmu *pmu)
274 .enable = mipspmu_enable, 297{
275 .disable = mipspmu_disable, 298 if (mipspmu)
276 .unthrottle = mipspmu_unthrottle, 299 mipspmu->start();
277 .read = mipspmu_read, 300}
278}; 301
302static void mipspmu_disable(struct pmu *pmu)
303{
304 if (mipspmu)
305 mipspmu->stop();
306}
279 307
280static atomic_t active_events = ATOMIC_INIT(0); 308static atomic_t active_events = ATOMIC_INIT(0);
281static DEFINE_MUTEX(pmu_reserve_mutex); 309static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
318 perf_irq = save_perf_irq; 346 perf_irq = save_perf_irq;
319} 347}
320 348
349/*
350 * mipsxx/rm9000/loongson2 have different performance counters, they have
351 * specific low-level init routines.
352 */
353static void reset_counters(void *arg);
354static int __hw_perf_event_init(struct perf_event *event);
355
356static void hw_perf_event_destroy(struct perf_event *event)
357{
358 if (atomic_dec_and_mutex_lock(&active_events,
359 &pmu_reserve_mutex)) {
360 /*
361 * We must not call the destroy function with interrupts
362 * disabled.
363 */
364 on_each_cpu(reset_counters,
365 (void *)(long)mipspmu->num_counters, 1);
366 mipspmu_free_irq();
367 mutex_unlock(&pmu_reserve_mutex);
368 }
369}
370
371static int mipspmu_event_init(struct perf_event *event)
372{
373 int err = 0;
374
375 switch (event->attr.type) {
376 case PERF_TYPE_RAW:
377 case PERF_TYPE_HARDWARE:
378 case PERF_TYPE_HW_CACHE:
379 break;
380
381 default:
382 return -ENOENT;
383 }
384
385 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
386 (event->cpu >= 0 && !cpu_online(event->cpu)))
387 return -ENODEV;
388
389 if (!atomic_inc_not_zero(&active_events)) {
390 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
391 atomic_dec(&active_events);
392 return -ENOSPC;
393 }
394
395 mutex_lock(&pmu_reserve_mutex);
396 if (atomic_read(&active_events) == 0)
397 err = mipspmu_get_irq();
398
399 if (!err)
400 atomic_inc(&active_events);
401 mutex_unlock(&pmu_reserve_mutex);
402 }
403
404 if (err)
405 return err;
406
407 err = __hw_perf_event_init(event);
408 if (err)
409 hw_perf_event_destroy(event);
410
411 return err;
412}
413
414static struct pmu pmu = {
415 .pmu_enable = mipspmu_enable,
416 .pmu_disable = mipspmu_disable,
417 .event_init = mipspmu_event_init,
418 .add = mipspmu_add,
419 .del = mipspmu_del,
420 .start = mipspmu_start,
421 .stop = mipspmu_stop,
422 .read = mipspmu_read,
423};
424
321static inline unsigned int 425static inline unsigned int
322mipspmu_perf_event_encode(const struct mips_perf_event *pev) 426mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323{ 427{
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
382{ 486{
383 struct hw_perf_event fake_hwc = event->hw; 487 struct hw_perf_event fake_hwc = event->hw;
384 488
385 if (event->pmu && event->pmu != &pmu) 489 /* Allow mixed event group. So return 1 to pass validation. */
386 return 0; 490 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
491 return 1;
387 492
388 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; 493 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
389} 494}
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
409 return 0; 514 return 0;
410} 515}
411 516
412/*
413 * mipsxx/rm9000/loongson2 have different performance counters, they have
414 * specific low-level init routines.
415 */
416static void reset_counters(void *arg);
417static int __hw_perf_event_init(struct perf_event *event);
418
419static void hw_perf_event_destroy(struct perf_event *event)
420{
421 if (atomic_dec_and_mutex_lock(&active_events,
422 &pmu_reserve_mutex)) {
423 /*
424 * We must not call the destroy function with interrupts
425 * disabled.
426 */
427 on_each_cpu(reset_counters,
428 (void *)(long)mipspmu->num_counters, 1);
429 mipspmu_free_irq();
430 mutex_unlock(&pmu_reserve_mutex);
431 }
432}
433
434const struct pmu *hw_perf_event_init(struct perf_event *event)
435{
436 int err = 0;
437
438 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439 (event->cpu >= 0 && !cpu_online(event->cpu)))
440 return ERR_PTR(-ENODEV);
441
442 if (!atomic_inc_not_zero(&active_events)) {
443 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444 atomic_dec(&active_events);
445 return ERR_PTR(-ENOSPC);
446 }
447
448 mutex_lock(&pmu_reserve_mutex);
449 if (atomic_read(&active_events) == 0)
450 err = mipspmu_get_irq();
451
452 if (!err)
453 atomic_inc(&active_events);
454 mutex_unlock(&pmu_reserve_mutex);
455 }
456
457 if (err)
458 return ERR_PTR(err);
459
460 err = __hw_perf_event_init(event);
461 if (err)
462 hw_perf_event_destroy(event);
463
464 return err ? ERR_PTR(err) : &pmu;
465}
466
467void hw_perf_enable(void)
468{
469 if (mipspmu)
470 mipspmu->start();
471}
472
473void hw_perf_disable(void)
474{
475 if (mipspmu)
476 mipspmu->stop();
477}
478
479/* This is needed by specific irq handlers in perf_event_*.c */ 517/* This is needed by specific irq handlers in perf_event_*.c */
480static void 518static void
481handle_associated_event(struct cpu_hw_events *cpuc, 519handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
496#include "perf_event_mipsxx.c" 534#include "perf_event_mipsxx.c"
497 535
498/* Callchain handling code. */ 536/* Callchain handling code. */
499static inline void
500callchain_store(struct perf_callchain_entry *entry,
501 u64 ip)
502{
503 if (entry->nr < PERF_MAX_STACK_DEPTH)
504 entry->ip[entry->nr++] = ip;
505}
506 537
507/* 538/*
508 * Leave userspace callchain empty for now. When we find a way to trace 539 * Leave userspace callchain empty for now. When we find a way to trace
509 * the user stack callchains, we add here. 540 * the user stack callchains, we add here.
510 */ 541 */
511static void 542void perf_callchain_user(struct perf_callchain_entry *entry,
512perf_callchain_user(struct pt_regs *regs, 543 struct pt_regs *regs)
513 struct perf_callchain_entry *entry)
514{ 544{
515} 545}
516 546
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
523 while (!kstack_end(sp)) { 553 while (!kstack_end(sp)) {
524 addr = *sp++; 554 addr = *sp++;
525 if (__kernel_text_address(addr)) { 555 if (__kernel_text_address(addr)) {
526 callchain_store(entry, addr); 556 perf_callchain_store(entry, addr);
527 if (entry->nr >= PERF_MAX_STACK_DEPTH) 557 if (entry->nr >= PERF_MAX_STACK_DEPTH)
528 break; 558 break;
529 } 559 }
530 } 560 }
531} 561}
532 562
533static void 563void perf_callchain_kernel(struct perf_callchain_entry *entry,
534perf_callchain_kernel(struct pt_regs *regs, 564 struct pt_regs *regs)
535 struct perf_callchain_entry *entry)
536{ 565{
537 unsigned long sp = regs->regs[29]; 566 unsigned long sp = regs->regs[29];
538#ifdef CONFIG_KALLSYMS 567#ifdef CONFIG_KALLSYMS
539 unsigned long ra = regs->regs[31]; 568 unsigned long ra = regs->regs[31];
540 unsigned long pc = regs->cp0_epc; 569 unsigned long pc = regs->cp0_epc;
541 570
542 callchain_store(entry, PERF_CONTEXT_KERNEL);
543 if (raw_show_trace || !__kernel_text_address(pc)) { 571 if (raw_show_trace || !__kernel_text_address(pc)) {
544 unsigned long stack_page = 572 unsigned long stack_page =
545 (unsigned long)task_stack_page(current); 573 (unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
549 return; 577 return;
550 } 578 }
551 do { 579 do {
552 callchain_store(entry, pc); 580 perf_callchain_store(entry, pc);
553 if (entry->nr >= PERF_MAX_STACK_DEPTH) 581 if (entry->nr >= PERF_MAX_STACK_DEPTH)
554 break; 582 break;
555 pc = unwind_stack(current, &sp, pc, &ra); 583 pc = unwind_stack(current, &sp, pc, &ra);
556 } while (pc); 584 } while (pc);
557#else 585#else
558 callchain_store(entry, PERF_CONTEXT_KERNEL);
559 save_raw_perf_callchain(entry, sp); 586 save_raw_perf_callchain(entry, sp);
560#endif 587#endif
561} 588}
562
563static void
564perf_do_callchain(struct pt_regs *regs,
565 struct perf_callchain_entry *entry)
566{
567 int is_user;
568
569 if (!regs)
570 return;
571
572 is_user = user_mode(regs);
573
574 if (!current || !current->pid)
575 return;
576
577 if (is_user && current->state != TASK_RUNNING)
578 return;
579
580 if (!is_user) {
581 perf_callchain_kernel(regs, entry);
582 if (current->mm)
583 regs = task_pt_regs(current);
584 else
585 regs = NULL;
586 }
587 if (regs)
588 perf_callchain_user(regs, entry);
589}
590
591static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
592
593struct perf_callchain_entry *
594perf_callchain(struct pt_regs *regs)
595{
596 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
597
598 entry->nr = 0;
599 perf_do_callchain(regs, entry);
600 return entry;
601}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 183e0d22666..75266ff4cc3 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
696 * interrupt, not NMI. 696 * interrupt, not NMI.
697 */ 697 */
698 if (handled == IRQ_HANDLED) 698 if (handled == IRQ_HANDLED)
699 perf_event_do_pending(); 699 irq_work_run();
700 700
701#ifdef CONFIG_MIPS_MT_SMP 701#ifdef CONFIG_MIPS_MT_SMP
702 read_unlock(&pmuint_rwlock); 702 read_unlock(&pmuint_rwlock);
@@ -721,7 +721,7 @@ static void mipsxx_pmu_start(void)
721 721
722/* 722/*
723 * MIPS performance counters can be per-TC. The control registers can 723 * MIPS performance counters can be per-TC. The control registers can
724 * not be directly accessed accross CPUs. Hence if we want to do global 724 * not be directly accessed across CPUs. Hence if we want to do global
725 * control, we need cross CPU calls. on_each_cpu() can help us, but we 725 * control, we need cross CPU calls. on_each_cpu() can help us, but we
726 * can not make sure this function is called with interrupts enabled. So 726 * can not make sure this function is called with interrupts enabled. So
727 * here we pause local counters and then grab a rwlock and leave the 727 * here we pause local counters and then grab a rwlock and leave the
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq, 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 irq < 0 ? " (share with timer interrupt)" : ""); 1046 irq < 0 ? " (share with timer interrupt)" : "");
1047 1047
1048 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1049
1048 return 0; 1050 return 0;
1049} 1051}
1050early_initcall(init_hw_perf_events); 1052early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ae167df73dd..d2112d3cf11 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -410,7 +410,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
410 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 410 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
411 return 0; 411 return 0;
412 /* 412 /*
413 * Return ra if an exception occured at the first instruction 413 * Return ra if an exception occurred at the first instruction
414 */ 414 */
415 if (unlikely(ofs == 0)) { 415 if (unlikely(ofs == 0)) {
416 pc = *ra; 416 pc = *ra;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fbaabad0e6e..7f5468b38d4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -586,6 +586,10 @@ einval: li v0, -ENOSYS
586 sys sys_fanotify_init 2 586 sys sys_fanotify_init 2
587 sys sys_fanotify_mark 6 587 sys sys_fanotify_mark 6
588 sys sys_prlimit64 4 588 sys sys_prlimit64 4
589 sys sys_name_to_handle_at 5
590 sys sys_open_by_handle_at 3 /* 4340 */
591 sys sys_clock_adjtime 2
592 sys sys_syncfs 1
589 .endm 593 .endm
590 594
591 /* We pre-compute the number of _instruction_ bytes needed to 595 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 3f417928320..a2e1fcbc41d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,8 @@ sys_call_table:
425 PTR sys_fanotify_init /* 5295 */ 425 PTR sys_fanotify_init /* 5295 */
426 PTR sys_fanotify_mark 426 PTR sys_fanotify_mark
427 PTR sys_prlimit64 427 PTR sys_prlimit64
428 PTR sys_name_to_handle_at
429 PTR sys_open_by_handle_at
430 PTR sys_clock_adjtime /* 5300 */
431 PTR sys_syncfs
428 .size sys_call_table,.-sys_call_table 432 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f08ece6d8ac..b2c7624995b 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -425,4 +425,8 @@ EXPORT(sysn32_call_table)
425 PTR sys_fanotify_init /* 6300 */ 425 PTR sys_fanotify_init /* 6300 */
426 PTR sys_fanotify_mark 426 PTR sys_fanotify_mark
427 PTR sys_prlimit64 427 PTR sys_prlimit64
428 PTR sys_name_to_handle_at
429 PTR sys_open_by_handle_at
430 PTR compat_sys_clock_adjtime /* 6305 */
431 PTR sys_syncfs
428 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 78d768a3e19..049a9c8c49a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -543,4 +543,8 @@ sys_call_table:
543 PTR sys_fanotify_init 543 PTR sys_fanotify_init
544 PTR sys_32_fanotify_mark 544 PTR sys_32_fanotify_mark
545 PTR sys_prlimit64 545 PTR sys_prlimit64
546 PTR sys_name_to_handle_at
547 PTR compat_sys_open_by_handle_at /* 4340 */
548 PTR compat_sys_clock_adjtime
549 PTR sys_syncfs
546 .size sys_call_table,.-sys_call_table 550 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342bca3..dbbe0ce48d8 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 84
85static int protected_restore_fp_context(struct sigcontext __user *sc) 85static int protected_restore_fp_context(struct sigcontext __user *sc)
86{ 86{
87 int err, tmp; 87 int err, tmp __maybe_unused;
88 while (1) { 88 while (1) {
89 lock_fpu_owner(); 89 lock_fpu_owner();
90 own_fpu_inatomic(0); 90 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e052b2..aae98661379 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
115 115
116static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 116static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
117{ 117{
118 int err, tmp; 118 int err, tmp __maybe_unused;
119 while (1) { 119 while (1) {
120 lock_fpu_owner(); 120 lock_fpu_owner();
121 own_fpu_inatomic(0); 121 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index c0e81418ba2..1ec56e635d0 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -120,7 +120,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
120 120
121 local_irq_save(flags); 121 local_irq_save(flags);
122 122
123 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 123 vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
124 124
125 switch (action) { 125 switch (action) {
126 case SMP_CALL_FUNCTION: 126 case SMP_CALL_FUNCTION:
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb95cb4..32a25610108 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
193 */ 193 */
194static struct task_struct *cpu_idle_thread[NR_CPUS]; 194static struct task_struct *cpu_idle_thread[NR_CPUS];
195 195
196struct create_idle {
197 struct work_struct work;
198 struct task_struct *idle;
199 struct completion done;
200 int cpu;
201};
202
203static void __cpuinit do_fork_idle(struct work_struct *work)
204{
205 struct create_idle *c_idle =
206 container_of(work, struct create_idle, work);
207
208 c_idle->idle = fork_idle(c_idle->cpu);
209 complete(&c_idle->done);
210}
211
196int __cpuinit __cpu_up(unsigned int cpu) 212int __cpuinit __cpu_up(unsigned int cpu)
197{ 213{
198 struct task_struct *idle; 214 struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
203 * Linux can schedule processes on this slave. 219 * Linux can schedule processes on this slave.
204 */ 220 */
205 if (!cpu_idle_thread[cpu]) { 221 if (!cpu_idle_thread[cpu]) {
206 idle = fork_idle(cpu); 222 /*
207 cpu_idle_thread[cpu] = idle; 223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
225 */
226 struct create_idle c_idle = {
227 .cpu = cpu,
228 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
229 };
230
231 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
232 schedule_work(&c_idle.work);
233 wait_for_completion(&c_idle.done);
234 idle = cpu_idle_thread[cpu] = c_idle.idle;
208 235
209 if (IS_ERR(idle)) 236 if (IS_ERR(idle))
210 panic(KERN_ERR "Fork failed for CPU %d", cpu); 237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 39c08254b0f..5a88cc4ccd5 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
677 */ 677 */
678} 678}
679 679
680void smtc_forward_irq(unsigned int irq) 680void smtc_forward_irq(struct irq_data *d)
681{ 681{
682 unsigned int irq = d->irq;
682 int target; 683 int target;
683 684
684 /* 685 /*
@@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq)
692 * and efficiency, we just pick the easiest one to find. 693 * and efficiency, we just pick the easiest one to find.
693 */ 694 */
694 695
695 target = cpumask_first(irq_desc[irq].affinity); 696 target = cpumask_first(d->affinity);
696 697
697 /* 698 /*
698 * We depend on the platform code to have correctly processed 699 * We depend on the platform code to have correctly processed
@@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
707 */ 708 */
708 709
709 /* If no one is eligible, service locally */ 710 /* If no one is eligible, service locally */
710 if (target >= NR_CPUS) { 711 if (target >= NR_CPUS)
711 do_IRQ_no_affinity(irq); 712 do_IRQ_no_affinity(irq);
712 return; 713 else
713 } 714 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
714
715 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
716} 715}
717 716
718#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 717#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
@@ -1147,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1147 1146
1148 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1147 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1149 1148
1150 set_irq_handler(cpu_ipi_irq, handle_percpu_irq); 1149 irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
1151} 1150}
1152 1151
1153/* 1152/*
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edff45e..58beabf50b3 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
383static int __used noinline 383static int __used noinline
384_sys_sysmips(nabi_no_regargs struct pt_regs regs) 384_sys_sysmips(nabi_no_regargs struct pt_regs regs)
385{ 385{
386 long cmd, arg1, arg2, arg3; 386 long cmd, arg1, arg2;
387 387
388 cmd = regs.regs[4]; 388 cmd = regs.regs[4];
389 arg1 = regs.regs[5]; 389 arg1 = regs.regs[5];
390 arg2 = regs.regs[6]; 390 arg2 = regs.regs[6];
391 arg3 = regs.regs[7];
392 391
393 switch (cmd) { 392 switch (cmd) {
394 case MIPS_ATOMIC_SET: 393 case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
405 if (arg1 & 2) 404 if (arg1 & 2)
406 set_thread_flag(TIF_LOGADE); 405 set_thread_flag(TIF_LOGADE);
407 else 406 else
408 clear_thread_flag(TIF_FIXADE); 407 clear_thread_flag(TIF_LOGADE);
409 408
410 return 0; 409 return 0;
411 410
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index fb749740551..1083ad4e101 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -102,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void)
102 case CPU_R4400SC: 102 case CPU_R4400SC:
103 case CPU_R4400MC: 103 case CPU_R4400MC:
104 /* 104 /*
105 * The published errata for the R4400 upto 3.0 say the CPU 105 * The published errata for the R4400 up to 3.0 say the CPU
106 * has the mfc0 from count bug. 106 * has the mfc0 from count bug.
107 */ 107 */
108 if ((current_cpu_data.processor_id & 0xff) <= 0x30) 108 if ((current_cpu_data.processor_id & 0xff) <= 0x30)
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 570607b376b..832afbb8758 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -115,7 +115,7 @@ SECTIONS
115 EXIT_DATA 115 EXIT_DATA
116 } 116 }
117 117
118 PERCPU(PAGE_SIZE) 118 PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(PAGE_SIZE);
120 __init_end = .; 120 __init_end = .;
121 /* freed after init ends here */ 121 /* freed after init ends here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfef8fd..dbb6b408f00 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -19,7 +19,7 @@
19 * VPE support module 19 * VPE support module
20 * 20 *
21 * Provides support for loading a MIPS SP program on VPE1. 21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable 22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup 23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up 24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples. 25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
@@ -148,9 +148,9 @@ struct {
148 spinlock_t tc_list_lock; 148 spinlock_t tc_list_lock;
149 struct list_head tc_list; /* Thread contexts */ 149 struct list_head tc_list; /* Thread contexts */
150} vpecontrol = { 150} vpecontrol = {
151 .vpe_list_lock = SPIN_LOCK_UNLOCKED, 151 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
153 .tc_list_lock = SPIN_LOCK_UNLOCKED, 153 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155}; 155};
156 156
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 1353fb135ed..de4c165515d 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -32,24 +32,24 @@ static volatile int *lasat_int_status;
32static volatile int *lasat_int_mask; 32static volatile int *lasat_int_mask;
33static volatile int lasat_int_mask_shift; 33static volatile int lasat_int_mask_shift;
34 34
35void disable_lasat_irq(unsigned int irq_nr) 35void disable_lasat_irq(struct irq_data *d)
36{ 36{
37 irq_nr -= LASAT_IRQ_BASE; 37 unsigned int irq_nr = d->irq - LASAT_IRQ_BASE;
38
38 *lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift; 39 *lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift;
39} 40}
40 41
41void enable_lasat_irq(unsigned int irq_nr) 42void enable_lasat_irq(struct irq_data *d)
42{ 43{
43 irq_nr -= LASAT_IRQ_BASE; 44 unsigned int irq_nr = d->irq - LASAT_IRQ_BASE;
45
44 *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; 46 *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
45} 47}
46 48
47static struct irq_chip lasat_irq_type = { 49static struct irq_chip lasat_irq_type = {
48 .name = "Lasat", 50 .name = "Lasat",
49 .ack = disable_lasat_irq, 51 .irq_mask = disable_lasat_irq,
50 .mask = disable_lasat_irq, 52 .irq_unmask = enable_lasat_irq,
51 .mask_ack = disable_lasat_irq,
52 .unmask = enable_lasat_irq,
53}; 53};
54 54
55static inline int ls1bit32(unsigned int x) 55static inline int ls1bit32(unsigned int x)
@@ -128,7 +128,7 @@ void __init arch_init_irq(void)
128 mips_cpu_irq_init(); 128 mips_cpu_irq_init();
129 129
130 for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++) 130 for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++)
131 set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq); 131 irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
132 132
133 setup_irq(LASAT_CASCADE_IRQ, &cascade); 133 setup_irq(LASAT_CASCADE_IRQ, &cascade);
134} 134}
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index c768e300061..64457162f7e 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -17,7 +17,7 @@
17 .previous 17 .previous
18 18
19/* 19/*
20 * Return the size of a string including the ending NUL character upto a 20 * Return the size of a string including the ending NUL character up to a
21 * maximum of a1 or 0 in case of error. 21 * maximum of a1 or 0 in case of error.
22 * 22 *
23 * Note: for performance reasons we deliberately accept that a user may 23 * Note: for performance reasons we deliberately accept that a user may
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 6e1b77fec7e..aca93eed877 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -1,6 +1,7 @@
1if MACH_LOONGSON
2
1choice 3choice
2 prompt "Machine Type" 4 prompt "Machine Type"
3 depends on MACH_LOONGSON
4 5
5config LEMOTE_FULOONG2E 6config LEMOTE_FULOONG2E
6 bool "Lemote Fuloong(2e) mini-PC" 7 bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
87config LOONGSON_MC146818 88config LOONGSON_MC146818
88 bool 89 bool
89 default n 90 default n
91
92endif # MACH_LOONGSON
diff --git a/arch/mips/loongson/common/bonito-irq.c b/arch/mips/loongson/common/bonito-irq.c
index 2dc2a4cc632..f27d7ccca92 100644
--- a/arch/mips/loongson/common/bonito-irq.c
+++ b/arch/mips/loongson/common/bonito-irq.c
@@ -16,24 +16,22 @@
16 16
17#include <loongson.h> 17#include <loongson.h>
18 18
19static inline void bonito_irq_enable(unsigned int irq) 19static inline void bonito_irq_enable(struct irq_data *d)
20{ 20{
21 LOONGSON_INTENSET = (1 << (irq - LOONGSON_IRQ_BASE)); 21 LOONGSON_INTENSET = (1 << (d->irq - LOONGSON_IRQ_BASE));
22 mmiowb(); 22 mmiowb();
23} 23}
24 24
25static inline void bonito_irq_disable(unsigned int irq) 25static inline void bonito_irq_disable(struct irq_data *d)
26{ 26{
27 LOONGSON_INTENCLR = (1 << (irq - LOONGSON_IRQ_BASE)); 27 LOONGSON_INTENCLR = (1 << (d->irq - LOONGSON_IRQ_BASE));
28 mmiowb(); 28 mmiowb();
29} 29}
30 30
31static struct irq_chip bonito_irq_type = { 31static struct irq_chip bonito_irq_type = {
32 .name = "bonito_irq", 32 .name = "bonito_irq",
33 .ack = bonito_irq_disable, 33 .irq_mask = bonito_irq_disable,
34 .mask = bonito_irq_disable, 34 .irq_unmask = bonito_irq_enable,
35 .mask_ack = bonito_irq_disable,
36 .unmask = bonito_irq_enable,
37}; 35};
38 36
39static struct irqaction __maybe_unused dma_timeout_irqaction = { 37static struct irqaction __maybe_unused dma_timeout_irqaction = {
@@ -46,7 +44,8 @@ void bonito_irq_init(void)
46 u32 i; 44 u32 i;
47 45
48 for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++) 46 for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++)
49 set_irq_chip_and_handler(i, &bonito_irq_type, handle_level_irq); 47 irq_set_chip_and_handler(i, &bonito_irq_type,
48 handle_level_irq);
50 49
51#ifdef CONFIG_CPU_LOONGSON2E 50#ifdef CONFIG_CPU_LOONGSON2E
52 setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction); 51 setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction);
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 1a06defc4f7..353e1d2e41a 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
44 strcat(arcs_cmdline, " "); 44 strcat(arcs_cmdline, " ");
45 } 45 }
46 46
47 if ((strstr(arcs_cmdline, "console=")) == NULL)
48 strcat(arcs_cmdline, " console=ttyS0,115200");
49 if ((strstr(arcs_cmdline, "root=")) == NULL)
50 strcat(arcs_cmdline, " root=/dev/hda1");
51
52 prom_init_machtype(); 47 prom_init_machtype();
53} 48}
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 81fbe6b73f9..2efd5d9dee2 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
41 41
42void __init prom_init_machtype(void) 42void __init prom_init_machtype(void)
43{ 43{
44 char *p, str[MACHTYPE_LEN]; 44 char *p, str[MACHTYPE_LEN + 1];
45 int machtype = MACH_LEMOTE_FL2E; 45 int machtype = MACH_LEMOTE_FL2E;
46 46
47 mips_machtype = LOONGSON_MACHTYPE; 47 mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
53 } 53 }
54 p += strlen("machtype="); 54 p += strlen("machtype=");
55 strncpy(str, p, MACHTYPE_LEN); 55 strncpy(str, p, MACHTYPE_LEN);
56 str[MACHTYPE_LEN] = '\0';
56 p = strstr(str, " "); 57 p = strstr(str, " ");
57 if (p) 58 if (p)
58 *p = '\0'; 59 *p = '\0';
diff --git a/arch/mips/math-emu/dp_fsp.c b/arch/mips/math-emu/dp_fsp.c
index 1dfbd92ba9d..daed6834dc1 100644
--- a/arch/mips/math-emu/dp_fsp.c
+++ b/arch/mips/math-emu/dp_fsp.c
@@ -62,7 +62,7 @@ ieee754dp ieee754dp_fsp(ieee754sp x)
62 break; 62 break;
63 } 63 }
64 64
65 /* CANT possibly overflow,underflow, or need rounding 65 /* CAN'T possibly overflow,underflow, or need rounding
66 */ 66 */
67 67
68 /* drop the hidden bit */ 68 /* drop the hidden bit */
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index aa566e785f5..09175f46192 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -104,7 +104,7 @@ ieee754dp ieee754dp_mul(ieee754dp x, ieee754dp y)
104 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): 104 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
105 break; 105 break;
106 } 106 }
107 /* rm = xm * ym, re = xe+ye basicly */ 107 /* rm = xm * ym, re = xe+ye basically */
108 assert(xm & DP_HIDDEN_BIT); 108 assert(xm & DP_HIDDEN_BIT);
109 assert(ym & DP_HIDDEN_BIT); 109 assert(ym & DP_HIDDEN_BIT);
110 { 110 {
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 36d975ae08f..3c4a8c5ba7f 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -32,7 +32,7 @@
32 * not change cp0_epc due to the instruction 32 * not change cp0_epc due to the instruction
33 * 33 *
34 * According to the spec: 34 * According to the spec:
35 * 1) it shouldnt be a branch :-) 35 * 1) it shouldn't be a branch :-)
36 * 2) it can be a COP instruction :-( 36 * 2) it can be a COP instruction :-(
37 * 3) if we are tring to run a protected memory space we must take 37 * 3) if we are tring to run a protected memory space we must take
38 * special care on memory access instructions :-( 38 * special care on memory access instructions :-(
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2701d950095..2a7d43f4f16 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -70,7 +70,7 @@
70 70
71 71
72#define COMPXSP \ 72#define COMPXSP \
73 unsigned xm; int xe; int xs; int xc 73 unsigned xm; int xe; int xs __maybe_unused; int xc
74 74
75#define COMPYSP \ 75#define COMPYSP \
76 unsigned ym; int ye; int ys; int yc 76 unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
104 104
105 105
106#define COMPXDP \ 106#define COMPXDP \
107u64 xm; int xe; int xs; int xc 107u64 xm; int xe; int xs __maybe_unused; int xc
108 108
109#define COMPYDP \ 109#define COMPYDP \
110u64 ym; int ye; int ys; int yc 110u64 ym; int ye; int ys; int yc
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index c06bb4022be..2722a2570ea 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -104,7 +104,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
104 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): 104 case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
105 break; 105 break;
106 } 106 }
107 /* rm = xm * ym, re = xe+ye basicly */ 107 /* rm = xm * ym, re = xe+ye basically */
108 assert(xm & SP_HIDDEN_BIT); 108 assert(xm & SP_HIDDEN_BIT);
109 assert(ym & SP_HIDDEN_BIT); 109 assert(ym & SP_HIDDEN_BIT);
110 110
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index 5da30b6a65b..30df47258c2 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -27,6 +27,7 @@
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28#include <asm/cpu.h> 28#include <asm/cpu.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/smtc.h>
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
32#include <asm/smtc_ipi.h> 33#include <asm/smtc_ipi.h>
@@ -57,8 +58,6 @@ static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
57 */ 58 */
58static void __cpuinit ssmtc_init_secondary(void) 59static void __cpuinit ssmtc_init_secondary(void)
59{ 60{
60 void smtc_init_secondary(void);
61
62 smtc_init_secondary(); 61 smtc_init_secondary();
63} 62}
64 63
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 2d08268bb70..89c412bc4b6 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -79,7 +79,7 @@ LEAF(except_vec2_sb1)
79recovered_dcache: 79recovered_dcache:
80 /* 80 /*
81 * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA). 81 * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA).
82 * Ought to log the occurence of this recovered dcache error. 82 * Ought to log the occurrence of this recovered dcache error.
83 */ 83 */
84 b recovered 84 b recovered
85 mtc0 $0,C0_CERR_D 85 mtc0 $0,C0_CERR_D
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2efcbd24c82..279599e9a77 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
324void __init paging_init(void) 324void __init paging_init(void)
325{ 325{
326 unsigned long max_zone_pfns[MAX_NR_ZONES]; 326 unsigned long max_zone_pfns[MAX_NR_ZONES];
327 unsigned long lastpfn; 327 unsigned long lastpfn __maybe_unused;
328 328
329 pagetable_init(); 329 pagetable_init();
330 330
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 083d3412d0b..5ef294fbb6e 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -109,6 +109,8 @@ static bool scratchpad_available(void)
109static int scratchpad_offset(int i) 109static int scratchpad_offset(int i)
110{ 110{
111 BUG(); 111 BUG();
112 /* Really unreachable, but evidently some GCC want this. */
113 return 0;
112} 114}
113#endif 115#endif
114/* 116/*
@@ -350,7 +352,7 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
350 352
351/* 353/*
352 * Write random or indexed TLB entry, and care about the hazards from 354 * Write random or indexed TLB entry, and care about the hazards from
353 * the preceeding mtc0 and for the following eret. 355 * the preceding mtc0 and for the following eret.
354 */ 356 */
355enum tlb_write_entry { tlb_random, tlb_indexed }; 357enum tlb_write_entry { tlb_random, tlb_indexed };
356 358
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index b79b24afe3a..9027061f0ea 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -472,7 +472,7 @@ static void __init fill_ipi_map(void)
472void __init arch_init_ipiirq(int irq, struct irqaction *action) 472void __init arch_init_ipiirq(int irq, struct irqaction *action)
473{ 473{
474 setup_irq(irq, action); 474 setup_irq(irq, action);
475 set_irq_handler(irq, handle_percpu_irq); 475 irq_set_handler(irq, handle_percpu_irq);
476} 476}
477 477
478void __init arch_init_irq(void) 478void __init arch_init_irq(void)
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 192cfd2a539..49a38b09a48 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -34,7 +34,6 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
34 */ 34 */
35static void __cpuinit msmtc_init_secondary(void) 35static void __cpuinit msmtc_init_secondary(void)
36{ 36{
37 void smtc_init_secondary(void);
38 int myvpe; 37 int myvpe;
39 38
40 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ 39 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
@@ -114,7 +113,8 @@ struct plat_smp_ops msmtc_smp_ops = {
114 */ 113 */
115 114
116 115
117int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 116int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
117 bool force)
118{ 118{
119 cpumask_t tmask; 119 cpumask_t tmask;
120 int cpu = 0; 120 int cpu = 0;
@@ -130,7 +130,7 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
130 * cleared in the affinity mask, there will never be any 130 * cleared in the affinity mask, there will never be any
131 * interrupt forwarding. But as soon as a program or operator 131 * interrupt forwarding. But as soon as a program or operator
132 * sets affinity for one of the related IRQs, we need to make 132 * sets affinity for one of the related IRQs, we need to make
133 * sure that we don't ever try to forward across the VPE boundry, 133 * sure that we don't ever try to forward across the VPE boundary,
134 * at least not until we engineer a system where the interrupt 134 * at least not until we engineer a system where the interrupt
135 * _ack() or _end() function can somehow know that it corresponds 135 * _ack() or _end() function can somehow know that it corresponds
136 * to an interrupt taken on another VPE, and perform the appropriate 136 * to an interrupt taken on another VPE, and perform the appropriate
@@ -144,7 +144,7 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
144 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) 144 if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
145 cpu_clear(cpu, tmask); 145 cpu_clear(cpu, tmask);
146 } 146 }
147 cpumask_copy(irq_desc[irq].affinity, &tmask); 147 cpumask_copy(d->affinity, &tmask);
148 148
149 if (cpus_empty(tmask)) 149 if (cpus_empty(tmask))
150 /* 150 /*
@@ -155,8 +155,8 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
155 "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); 155 "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
156 156
157 /* Do any generic SMTC IRQ affinity setup */ 157 /* Do any generic SMTC IRQ affinity setup */
158 smtc_set_irq_affinity(irq, tmask); 158 smtc_set_irq_affinity(d->irq, tmask);
159 159
160 return 0; 160 return IRQ_SET_MASK_OK_NOCOPY;
161} 161}
162#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 162#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 3c6f190aa61..1620b83cd13 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,7 +119,7 @@ static void __init plat_perf_setup(void)
119 set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); 119 set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
120 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 120 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
121#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
122 set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq); 122 irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq);
123#endif 123#endif
124 } 124 }
125} 125}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 02cc65e52d1..4b9d7044e26 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -1,4 +1,4 @@
1EXTRA_CFLAGS := -Werror 1ccflags-y := -Werror
2 2
3obj-$(CONFIG_OPROFILE) += oprofile.o 3obj-$(CONFIG_OPROFILE) += oprofile.o
4 4
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d8080499872..5d530f89d87 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -172,7 +172,7 @@ msi_irq_allocated:
172 pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 172 pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
173 control); 173 control);
174 174
175 set_irq_msi(irq, desc); 175 irq_set_msi_desc(irq, desc);
176 write_msi_msg(irq, &msg); 176 write_msi_msg(irq, &msg);
177 return 0; 177 return 0;
178} 178}
@@ -259,11 +259,11 @@ static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
259static u64 msi_rcv_reg[4]; 259static u64 msi_rcv_reg[4];
260static u64 mis_ena_reg[4]; 260static u64 mis_ena_reg[4];
261 261
262static void octeon_irq_msi_enable_pcie(unsigned int irq) 262static void octeon_irq_msi_enable_pcie(struct irq_data *data)
263{ 263{
264 u64 en; 264 u64 en;
265 unsigned long flags; 265 unsigned long flags;
266 int msi_number = irq - OCTEON_IRQ_MSI_BIT0; 266 int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
267 int irq_index = msi_number >> 6; 267 int irq_index = msi_number >> 6;
268 int irq_bit = msi_number & 0x3f; 268 int irq_bit = msi_number & 0x3f;
269 269
@@ -275,11 +275,11 @@ static void octeon_irq_msi_enable_pcie(unsigned int irq)
275 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); 275 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
276} 276}
277 277
278static void octeon_irq_msi_disable_pcie(unsigned int irq) 278static void octeon_irq_msi_disable_pcie(struct irq_data *data)
279{ 279{
280 u64 en; 280 u64 en;
281 unsigned long flags; 281 unsigned long flags;
282 int msi_number = irq - OCTEON_IRQ_MSI_BIT0; 282 int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
283 int irq_index = msi_number >> 6; 283 int irq_index = msi_number >> 6;
284 int irq_bit = msi_number & 0x3f; 284 int irq_bit = msi_number & 0x3f;
285 285
@@ -293,11 +293,11 @@ static void octeon_irq_msi_disable_pcie(unsigned int irq)
293 293
294static struct irq_chip octeon_irq_chip_msi_pcie = { 294static struct irq_chip octeon_irq_chip_msi_pcie = {
295 .name = "MSI", 295 .name = "MSI",
296 .enable = octeon_irq_msi_enable_pcie, 296 .irq_enable = octeon_irq_msi_enable_pcie,
297 .disable = octeon_irq_msi_disable_pcie, 297 .irq_disable = octeon_irq_msi_disable_pcie,
298}; 298};
299 299
300static void octeon_irq_msi_enable_pci(unsigned int irq) 300static void octeon_irq_msi_enable_pci(struct irq_data *data)
301{ 301{
302 /* 302 /*
303 * Octeon PCI doesn't have the ability to mask/unmask MSI 303 * Octeon PCI doesn't have the ability to mask/unmask MSI
@@ -308,15 +308,15 @@ static void octeon_irq_msi_enable_pci(unsigned int irq)
308 */ 308 */
309} 309}
310 310
311static void octeon_irq_msi_disable_pci(unsigned int irq) 311static void octeon_irq_msi_disable_pci(struct irq_data *data)
312{ 312{
313 /* See comment in enable */ 313 /* See comment in enable */
314} 314}
315 315
316static struct irq_chip octeon_irq_chip_msi_pci = { 316static struct irq_chip octeon_irq_chip_msi_pci = {
317 .name = "MSI", 317 .name = "MSI",
318 .enable = octeon_irq_msi_enable_pci, 318 .irq_enable = octeon_irq_msi_enable_pci,
319 .disable = octeon_irq_msi_disable_pci, 319 .irq_disable = octeon_irq_msi_disable_pci,
320}; 320};
321 321
322/* 322/*
@@ -388,7 +388,7 @@ int __init octeon_msi_initialize(void)
388 } 388 }
389 389
390 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) 390 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++)
391 set_irq_chip_and_handler(irq, msi, handle_simple_irq); 391 irq_set_chip_and_handler(irq, msi, handle_simple_irq);
392 392
393 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { 393 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
394 if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, 394 if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index b7c03d80c88..8fbfbf2b931 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
308 * RETURNS: PCIBIOS_SUCCESSFUL - success 308 * RETURNS: PCIBIOS_SUCCESSFUL - success
309 * 309 *
310 ****************************************************************************/ 310 ****************************************************************************/
311static int bpci_interrupt(int irq, void *dev_id) 311static irqreturn_t bpci_interrupt(int irq, void *dev_id)
312{ 312{
313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG; 313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
314 unsigned int stat = preg->if_status; 314 unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
326 /* write to clear all asserted interrupts */ 326 /* write to clear all asserted interrupts */
327 preg->if_status = stat; 327 preg->if_status = stat;
328 328
329 return PCIBIOS_SUCCESSFUL; 329 return IRQ_HANDLED;
330} 330}
331 331
332/***************************************************************************** 332/*****************************************************************************
@@ -344,7 +344,7 @@ static int bpci_interrupt(int irq, void *dev_id)
344 * PCI_ACCESS_WRITE and PCI_ACCESS_READ. 344 * PCI_ACCESS_WRITE and PCI_ACCESS_READ.
345 * 345 *
346 * bus - pointer to the bus number of the device to 346 * bus - pointer to the bus number of the device to
347 * be targetted for the configuration cycle. 347 * be targeted for the configuration cycle.
348 * The only element of the pci_bus structure 348 * The only element of the pci_bus structure
349 * used is bus->number. This argument determines 349 * used is bus->number. This argument determines
350 * if the configuration access will be Type 0 or 350 * if the configuration access will be Type 0 or
@@ -354,7 +354,7 @@ static int bpci_interrupt(int irq, void *dev_id)
354 * 354 *
355 * devfn - this is an 8-bit field. The lower three bits 355 * devfn - this is an 8-bit field. The lower three bits
356 * specify the function number of the device to 356 * specify the function number of the device to
357 * be targetted for the configuration cycle, with 357 * be targeted for the configuration cycle, with
358 * all three-bit combinations being legal. The 358 * all three-bit combinations being legal. The
359 * upper five bits specify the device number, 359 * upper five bits specify the device number,
360 * with legal values being 10 to 31. 360 * with legal values being 10 to 31.
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 6f5e24c6ae6..af8c3199696 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -210,7 +210,7 @@ static int __init bcm1480_pcibios_init(void)
210 PCIBIOS_MIN_IO = 0x00008000UL; 210 PCIBIOS_MIN_IO = 0x00008000UL;
211 PCIBIOS_MIN_MEM = 0x01000000UL; 211 PCIBIOS_MIN_MEM = 0x01000000UL;
212 212
213 /* Set I/O resource limits. - unlimited for now to accomodate HT */ 213 /* Set I/O resource limits. - unlimited for now to accommodate HT */
214 ioport_resource.end = 0xffffffffUL; 214 ioport_resource.end = 0xffffffffUL;
215 iomem_resource.end = 0xffffffffUL; 215 iomem_resource.end = 0xffffffffUL;
216 216
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 2d74fc9ae3b..ed1c54284b8 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -441,7 +441,7 @@ static void octeon_pci_initialize(void)
441 441
442 /* 442 /*
443 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4 443 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
444 * in PCI-X mode to allow four oustanding splits. Otherwise, 444 * in PCI-X mode to allow four outstanding splits. Otherwise,
445 * should not change from its reset value. Don't write PCI_CFG19 445 * should not change from its reset value. Don't write PCI_CFG19
446 * in PCI mode (0x82000001 reset value), write it to 0x82000004 446 * in PCI mode (0x82000001 reset value), write it to 0x82000004
447 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero. 447 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
@@ -515,7 +515,7 @@ static void octeon_pci_initialize(void)
515#endif /* USE_OCTEON_INTERNAL_ARBITER */ 515#endif /* USE_OCTEON_INTERNAL_ARBITER */
516 516
517 /* 517 /*
518 * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE, 518 * Preferably written to 1 to set MLTD. [RDSATI,TRTAE,
519 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to 519 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
520 * 1..7. 520 * 1..7.
521 */ 521 */
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 38bc28005b4..33bba7bff25 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -125,7 +125,7 @@ void __devinit register_pci_controller(struct pci_controller *hose)
125 hose_tail = &hose->next; 125 hose_tail = &hose->next;
126 126
127 /* 127 /*
128 * Do not panic here but later - this might hapen before console init. 128 * Do not panic here but later - this might happen before console init.
129 */ 129 */
130 if (!hose->io_map_base) { 130 if (!hose->io_map_base) {
131 printk(KERN_WARNING 131 printk(KERN_WARNING
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index c139988bb85..bbd76082fa8 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -4,15 +4,11 @@ choice
4 4
5config PMC_MSP4200_EVAL 5config PMC_MSP4200_EVAL
6 bool "PMC-Sierra MSP4200 Eval Board" 6 bool "PMC-Sierra MSP4200 Eval Board"
7 select CEVT_R4K
8 select CSRC_R4K
9 select IRQ_MSP_SLP 7 select IRQ_MSP_SLP
10 select HW_HAS_PCI 8 select HW_HAS_PCI
11 9
12config PMC_MSP4200_GW 10config PMC_MSP4200_GW
13 bool "PMC-Sierra MSP4200 VoIP Gateway" 11 bool "PMC-Sierra MSP4200 VoIP Gateway"
14 select CEVT_R4K
15 select CSRC_R4K
16 select IRQ_MSP_SLP 12 select IRQ_MSP_SLP
17 select HW_HAS_PCI 13 select HW_HAS_PCI
18 14
@@ -27,6 +23,8 @@ config PMC_MSP7120_GW
27 select SYS_SUPPORTS_MULTITHREADING 23 select SYS_SUPPORTS_MULTITHREADING
28 select IRQ_MSP_CIC 24 select IRQ_MSP_CIC
29 select HW_HAS_PCI 25 select HW_HAS_PCI
26 select MSP_HAS_USB
27 select MSP_ETH
30 28
31config PMC_MSP7120_FPGA 29config PMC_MSP7120_FPGA
32 bool "PMC-Sierra MSP7120 FPGA" 30 bool "PMC-Sierra MSP7120 FPGA"
@@ -39,3 +37,16 @@ endchoice
39config HYPERTRANSPORT 37config HYPERTRANSPORT
40 bool "Hypertransport Support for PMC-Sierra Yosemite" 38 bool "Hypertransport Support for PMC-Sierra Yosemite"
41 depends on PMC_YOSEMITE 39 depends on PMC_YOSEMITE
40
41config MSP_HAS_USB
42 boolean
43 depends on PMC_MSP
44
45config MSP_ETH
46 boolean
47 select MSP_HAS_MAC
48 depends on PMC_MSP
49
50config MSP_HAS_MAC
51 boolean
52 depends on PMC_MSP
diff --git a/arch/mips/pmc-sierra/msp71xx/Makefile b/arch/mips/pmc-sierra/msp71xx/Makefile
index e107f79b149..cefba7733b7 100644
--- a/arch/mips/pmc-sierra/msp71xx/Makefile
+++ b/arch/mips/pmc-sierra/msp71xx/Makefile
@@ -6,7 +6,9 @@ obj-y += msp_prom.o msp_setup.o msp_irq.o \
6obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o 6obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o
7obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o 7obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o
8obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o 8obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o
9obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o 9obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o
10obj-$(CONFIG_PCI) += msp_pci.o 10obj-$(CONFIG_PCI) += msp_pci.o
11obj-$(CONFIG_MSPETH) += msp_eth.o 11obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o
12obj-$(CONFIG_USB_MSP71XX) += msp_usb.o 12obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o
13obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o
14obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_eth.c b/arch/mips/pmc-sierra/msp71xx/msp_eth.c
new file mode 100644
index 00000000000..c584df393de
--- /dev/null
+++ b/arch/mips/pmc-sierra/msp71xx/msp_eth.c
@@ -0,0 +1,187 @@
1/*
2 * The setup file for ethernet related hardware on PMC-Sierra MSP processors.
3 *
4 * Copyright 2010 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
14 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
16 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
17 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
18 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
20 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/ioport.h>
30#include <linux/platform_device.h>
31#include <linux/delay.h>
32#include <msp_regs.h>
33#include <msp_int.h>
34#include <msp_gpio_macros.h>
35
36
37#define MSP_ETHERNET_GPIO0 14
38#define MSP_ETHERNET_GPIO1 15
39#define MSP_ETHERNET_GPIO2 16
40
41#ifdef CONFIG_MSP_HAS_TSMAC
42#define MSP_TSMAC_SIZE 0x10020
43#define MSP_TSMAC_ID "pmc_tsmac"
44
45static struct resource msp_tsmac0_resources[] = {
46 [0] = {
47 .start = MSP_MAC0_BASE,
48 .end = MSP_MAC0_BASE + MSP_TSMAC_SIZE - 1,
49 .flags = IORESOURCE_MEM,
50 },
51 [1] = {
52 .start = MSP_INT_MAC0,
53 .end = MSP_INT_MAC0,
54 .flags = IORESOURCE_IRQ,
55 },
56};
57
58static struct resource msp_tsmac1_resources[] = {
59 [0] = {
60 .start = MSP_MAC1_BASE,
61 .end = MSP_MAC1_BASE + MSP_TSMAC_SIZE - 1,
62 .flags = IORESOURCE_MEM,
63 },
64 [1] = {
65 .start = MSP_INT_MAC1,
66 .end = MSP_INT_MAC1,
67 .flags = IORESOURCE_IRQ,
68 },
69};
70static struct resource msp_tsmac2_resources[] = {
71 [0] = {
72 .start = MSP_MAC2_BASE,
73 .end = MSP_MAC2_BASE + MSP_TSMAC_SIZE - 1,
74 .flags = IORESOURCE_MEM,
75 },
76 [1] = {
77 .start = MSP_INT_SAR,
78 .end = MSP_INT_SAR,
79 .flags = IORESOURCE_IRQ,
80 },
81};
82
83
84static struct platform_device tsmac_device[] = {
85 [0] = {
86 .name = MSP_TSMAC_ID,
87 .id = 0,
88 .num_resources = ARRAY_SIZE(msp_tsmac0_resources),
89 .resource = msp_tsmac0_resources,
90 },
91 [1] = {
92 .name = MSP_TSMAC_ID,
93 .id = 1,
94 .num_resources = ARRAY_SIZE(msp_tsmac1_resources),
95 .resource = msp_tsmac1_resources,
96 },
97 [2] = {
98 .name = MSP_TSMAC_ID,
99 .id = 2,
100 .num_resources = ARRAY_SIZE(msp_tsmac2_resources),
101 .resource = msp_tsmac2_resources,
102 },
103};
104#define msp_eth_devs tsmac_device
105
106#else
107/* If it is not TSMAC assume MSP_ETH (100Mbps) */
108#define MSP_ETH_ID "pmc_mspeth"
109#define MSP_ETH_SIZE 0xE0
110static struct resource msp_eth0_resources[] = {
111 [0] = {
112 .start = MSP_MAC0_BASE,
113 .end = MSP_MAC0_BASE + MSP_ETH_SIZE - 1,
114 .flags = IORESOURCE_MEM,
115 },
116 [1] = {
117 .start = MSP_INT_MAC0,
118 .end = MSP_INT_MAC0,
119 .flags = IORESOURCE_IRQ,
120 },
121};
122
123static struct resource msp_eth1_resources[] = {
124 [0] = {
125 .start = MSP_MAC1_BASE,
126 .end = MSP_MAC1_BASE + MSP_ETH_SIZE - 1,
127 .flags = IORESOURCE_MEM,
128 },
129 [1] = {
130 .start = MSP_INT_MAC1,
131 .end = MSP_INT_MAC1,
132 .flags = IORESOURCE_IRQ,
133 },
134};
135
136
137
138static struct platform_device mspeth_device[] = {
139 [0] = {
140 .name = MSP_ETH_ID,
141 .id = 0,
142 .num_resources = ARRAY_SIZE(msp_eth0_resources),
143 .resource = msp_eth0_resources,
144 },
145 [1] = {
146 .name = MSP_ETH_ID,
147 .id = 1,
148 .num_resources = ARRAY_SIZE(msp_eth1_resources),
149 .resource = msp_eth1_resources,
150 },
151
152};
153#define msp_eth_devs mspeth_device
154
155#endif
156int __init msp_eth_setup(void)
157{
158 int i, ret = 0;
159
160 /* Configure the GPIO and take the ethernet PHY out of reset */
161 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0);
162 msp_gpio_pin_hi(MSP_ETHERNET_GPIO0);
163
164#ifdef CONFIG_MSP_HAS_TSMAC
165 /* 3 phys on boards with TSMAC */
166 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO1);
167 msp_gpio_pin_hi(MSP_ETHERNET_GPIO1);
168
169 msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO2);
170 msp_gpio_pin_hi(MSP_ETHERNET_GPIO2);
171#endif
172 for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) {
173 ret = platform_device_register(&msp_eth_devs[i]);
174 printk(KERN_INFO "device: %d, return value = %d\n", i, ret);
175 if (ret) {
176 platform_device_unregister(&msp_eth_devs[i]);
177 break;
178 }
179 }
180
181 if (ret)
182 printk(KERN_WARNING "Could not initialize "
183 "MSPETH device structures.\n");
184
185 return ret;
186}
187subsys_initcall(msp_eth_setup);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
index 734d598a2e3..4531c4a514b 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
@@ -19,8 +19,6 @@
19 19
20#include <msp_int.h> 20#include <msp_int.h>
21 21
22extern void msp_int_handle(void);
23
24/* SLP bases systems */ 22/* SLP bases systems */
25extern void msp_slp_irq_init(void); 23extern void msp_slp_irq_init(void);
26extern void msp_slp_irq_dispatch(void); 24extern void msp_slp_irq_dispatch(void);
@@ -29,6 +27,18 @@ extern void msp_slp_irq_dispatch(void);
29extern void msp_cic_irq_init(void); 27extern void msp_cic_irq_init(void);
30extern void msp_cic_irq_dispatch(void); 28extern void msp_cic_irq_dispatch(void);
31 29
30/* VSMP support init */
31extern void msp_vsmp_int_init(void);
32
33/* vectored interrupt implementation */
34
35/* SW0/1 interrupts are used for SMP/SMTC */
36static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
37static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
38static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
39static inline void usb_int_dispatch(void) { do_IRQ(MSP_INT_USB); }
40static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); }
41
32/* 42/*
33 * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded 43 * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
34 * hierarchical system. The first level are the direct MIPS interrupts 44 * hierarchical system. The first level are the direct MIPS interrupts
@@ -96,29 +106,57 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
96 do_IRQ(MSP_INT_SW1); 106 do_IRQ(MSP_INT_SW1);
97} 107}
98 108
99static struct irqaction cascade_msp = { 109static struct irqaction cic_cascade_msp = {
100 .handler = no_action, 110 .handler = no_action,
101 .name = "MSP cascade" 111 .name = "MSP CIC cascade"
102}; 112};
103 113
114static struct irqaction per_cascade_msp = {
115 .handler = no_action,
116 .name = "MSP PER cascade"
117};
104 118
105void __init arch_init_irq(void) 119void __init arch_init_irq(void)
106{ 120{
121 /* assume we'll be using vectored interrupt mode except in UP mode*/
122#ifdef CONFIG_MIPS_MT
123 BUG_ON(!cpu_has_vint);
124#endif
107 /* initialize the 1st-level CPU based interrupt controller */ 125 /* initialize the 1st-level CPU based interrupt controller */
108 mips_cpu_irq_init(); 126 mips_cpu_irq_init();
109 127
110#ifdef CONFIG_IRQ_MSP_CIC 128#ifdef CONFIG_IRQ_MSP_CIC
111 msp_cic_irq_init(); 129 msp_cic_irq_init();
112 130#ifdef CONFIG_MIPS_MT
131 set_vi_handler(MSP_INT_CIC, msp_cic_irq_dispatch);
132 set_vi_handler(MSP_INT_MAC0, mac0_int_dispatch);
133 set_vi_handler(MSP_INT_MAC1, mac1_int_dispatch);
134 set_vi_handler(MSP_INT_SAR, mac2_int_dispatch);
135 set_vi_handler(MSP_INT_USB, usb_int_dispatch);
136 set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
137#ifdef CONFIG_MIPS_MT_SMP
138 msp_vsmp_int_init();
139#elif defined CONFIG_MIPS_MT_SMTC
140 /*Set hwmask for all platform devices */
141 irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
142 irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
143 irq_hwmask[MSP_INT_USB] = C_IRQ2;
144 irq_hwmask[MSP_INT_SAR] = C_IRQ3;
145 irq_hwmask[MSP_INT_SEC] = C_IRQ5;
146
147#endif /* CONFIG_MIPS_MT_SMP */
148#endif /* CONFIG_MIPS_MT */
113 /* setup the cascaded interrupts */ 149 /* setup the cascaded interrupts */
114 setup_irq(MSP_INT_CIC, &cascade_msp); 150 setup_irq(MSP_INT_CIC, &cic_cascade_msp);
115 setup_irq(MSP_INT_PER, &cascade_msp); 151 setup_irq(MSP_INT_PER, &per_cascade_msp);
152
116#else 153#else
117 /* setup the 2nd-level SLP register based interrupt controller */ 154 /* setup the 2nd-level SLP register based interrupt controller */
155 /* VSMP /SMTC support support is not enabled for SLP */
118 msp_slp_irq_init(); 156 msp_slp_irq_init();
119 157
120 /* setup the cascaded SLP/PER interrupts */ 158 /* setup the cascaded SLP/PER interrupts */
121 setup_irq(MSP_INT_SLP, &cascade_msp); 159 setup_irq(MSP_INT_SLP, &cic_cascade_msp);
122 setup_irq(MSP_INT_PER, &cascade_msp); 160 setup_irq(MSP_INT_PER, &per_cascade_msp);
123#endif 161#endif
124} 162}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
index 07e71ff2433..c4fa2d775d8 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * This file define the irq handler for MSP SLM subsystem interrupts. 2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 * 3 *
4 * Copyright 2005-2007 PMC-Sierra, Inc, derived from irq_cpu.c 4 * This file define the irq handler for MSP CIC subsystem interrupts.
5 * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -16,119 +15,203 @@
16#include <linux/bitops.h> 15#include <linux/bitops.h>
17#include <linux/irq.h> 16#include <linux/irq.h>
18 17
18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21#include <msp_cic_int.h> 21#include <msp_cic_int.h>
22#include <msp_regs.h> 22#include <msp_regs.h>
23 23
24/* 24/*
25 * NOTE: We are only enabling support for VPE0 right now. 25 * External API
26 */ 26 */
27extern void msp_per_irq_init(void);
28extern void msp_per_irq_dispatch(void);
27 29
28static inline void unmask_msp_cic_irq(unsigned int irq) 30
31/*
32 * Convenience Macro. Should be somewhere generic.
33 */
34#define get_current_vpe() \
35 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
36
37#ifdef CONFIG_SMP
38
39#define LOCK_VPE(flags, mtflags) \
40do { \
41 local_irq_save(flags); \
42 mtflags = dmt(); \
43} while (0)
44
45#define UNLOCK_VPE(flags, mtflags) \
46do { \
47 emt(mtflags); \
48 local_irq_restore(flags);\
49} while (0)
50
51#define LOCK_CORE(flags, mtflags) \
52do { \
53 local_irq_save(flags); \
54 mtflags = dvpe(); \
55} while (0)
56
57#define UNLOCK_CORE(flags, mtflags) \
58do { \
59 evpe(mtflags); \
60 local_irq_restore(flags);\
61} while (0)
62
63#else
64
65#define LOCK_VPE(flags, mtflags)
66#define UNLOCK_VPE(flags, mtflags)
67#endif
68
69/* ensure writes to cic are completed */
70static inline void cic_wmb(void)
29{ 71{
72 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
73 volatile u32 dummy_read;
30 74
31 /* check for PER interrupt range */ 75 wmb();
32 if (irq < MSP_PER_INTBASE) 76 dummy_read = __raw_readl(cic_mem);
33 *CIC_VPE0_MSK_REG |= (1 << (irq - MSP_CIC_INTBASE)); 77 dummy_read++;
34 else
35 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
36} 78}
37 79
38static inline void mask_msp_cic_irq(unsigned int irq) 80static void unmask_cic_irq(struct irq_data *d)
39{ 81{
40 /* check for PER interrupt range */ 82 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
41 if (irq < MSP_PER_INTBASE) 83 int vpe;
42 *CIC_VPE0_MSK_REG &= ~(1 << (irq - MSP_CIC_INTBASE)); 84#ifdef CONFIG_SMP
43 else 85 unsigned int mtflags;
44 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE)); 86 unsigned long flags;
87
88 /*
89 * Make sure we have IRQ affinity. It may have changed while
90 * we were processing the IRQ.
91 */
92 if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
93 return;
94#endif
95
96 vpe = get_current_vpe();
97 LOCK_VPE(flags, mtflags);
98 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
99 UNLOCK_VPE(flags, mtflags);
100 cic_wmb();
45} 101}
46 102
47/* 103static void mask_cic_irq(struct irq_data *d)
48 * While we ack the interrupt interrupts are disabled and thus we don't need
49 * to deal with concurrency issues. Same for msp_cic_irq_end.
50 */
51static inline void ack_msp_cic_irq(unsigned int irq)
52{ 104{
53 mask_msp_cic_irq(irq); 105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
54 106 int vpe = get_current_vpe();
107#ifdef CONFIG_SMP
108 unsigned long flags, mtflags;
109#endif
110 LOCK_VPE(flags, mtflags);
111 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
112 UNLOCK_VPE(flags, mtflags);
113 cic_wmb();
114}
115static void msp_cic_irq_ack(struct irq_data *d)
116{
117 mask_cic_irq(d);
55 /* 118 /*
56 * only really necessary for 18, 16-14 and sometimes 3:0 (since 119 * Only really necessary for 18, 16-14 and sometimes 3:0
57 * these can be edge sensitive) but it doesn't hurt for the others. 120 * (since these can be edge sensitive) but it doesn't
58 */ 121 * hurt for the others
59 122 */
60 /* check for PER interrupt range */ 123 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
61 if (irq < MSP_PER_INTBASE) 124 smtc_im_ack_irq(d->irq);
62 *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE));
63 else
64 *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
65} 125}
66 126
127/*Note: Limiting to VSMP . Not tested in SMTC */
128
129#ifdef CONFIG_MIPS_MT_SMP
130static int msp_cic_irq_set_affinity(struct irq_data *d,
131 const struct cpumask *cpumask, bool force)
132{
133 int cpu;
134 unsigned long flags;
135 unsigned int mtflags;
136 unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
137 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
138
139 /* timer balancing should be disabled in kernel code */
140 BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
141
142 LOCK_CORE(flags, mtflags);
143 /* enable if any of each VPE's TCs require this IRQ */
144 for_each_online_cpu(cpu) {
145 if (cpumask_test_cpu(cpu, cpumask))
146 cic_mask[cpu] |= imask;
147 else
148 cic_mask[cpu] &= ~imask;
149
150 }
151
152 UNLOCK_CORE(flags, mtflags);
153 return 0;
154
155}
156#endif
157
67static struct irq_chip msp_cic_irq_controller = { 158static struct irq_chip msp_cic_irq_controller = {
68 .name = "MSP_CIC", 159 .name = "MSP_CIC",
69 .ack = ack_msp_cic_irq, 160 .irq_mask = mask_cic_irq,
70 .mask = ack_msp_cic_irq, 161 .irq_mask_ack = msp_cic_irq_ack,
71 .mask_ack = ack_msp_cic_irq, 162 .irq_unmask = unmask_cic_irq,
72 .unmask = unmask_msp_cic_irq, 163 .irq_ack = msp_cic_irq_ack,
164#ifdef CONFIG_MIPS_MT_SMP
165 .irq_set_affinity = msp_cic_irq_set_affinity,
166#endif
73}; 167};
74 168
75
76void __init msp_cic_irq_init(void) 169void __init msp_cic_irq_init(void)
77{ 170{
78 int i; 171 int i;
79
80 /* Mask/clear interrupts. */ 172 /* Mask/clear interrupts. */
81 *CIC_VPE0_MSK_REG = 0x00000000; 173 *CIC_VPE0_MSK_REG = 0x00000000;
82 *PER_INT_MSK_REG = 0x00000000; 174 *CIC_VPE1_MSK_REG = 0x00000000;
83 *CIC_STS_REG = 0xFFFFFFFF; 175 *CIC_STS_REG = 0xFFFFFFFF;
84 *PER_INT_STS_REG = 0xFFFFFFFF;
85
86#if defined(CONFIG_PMC_MSP7120_GW) || \
87 defined(CONFIG_PMC_MSP7120_EVAL)
88 /* 176 /*
89 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. 177 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
90 * These inputs map to EXT_INT_POL[6:4] inside the CIC. 178 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
91 * They are to be active low, level sensitive. 179 * They are to be active low, level sensitive.
92 */ 180 */
93 *CIC_EXT_CFG_REG &= 0xFFFF8F8F; 181 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
94#endif
95 182
96 /* initialize all the IRQ descriptors */ 183 /* initialize all the IRQ descriptors */
97 for (i = MSP_CIC_INTBASE; i < MSP_PER_INTBASE + 32; i++) 184 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
98 set_irq_chip_and_handler(i, &msp_cic_irq_controller, 185 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
99 handle_level_irq); 186 handle_level_irq);
187#ifdef CONFIG_MIPS_MT_SMTC
188 /* Mask of CIC interrupt */
189 irq_hwmask[i] = C_IRQ4;
190#endif
191 }
192
193 /* Initialize the PER interrupt sub-system */
194 msp_per_irq_init();
100} 195}
101 196
197/* CIC masked by CIC vector processing before dispatch called */
102void msp_cic_irq_dispatch(void) 198void msp_cic_irq_dispatch(void)
103{ 199{
104 u32 pending; 200 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
105 int intbase; 201 u32 cic_mask;
106 202 u32 pending;
107 intbase = MSP_CIC_INTBASE; 203 int cic_status = *CIC_STS_REG;
108 pending = *CIC_STS_REG & *CIC_VPE0_MSK_REG; 204 cic_mask = cic_msk_reg[get_current_vpe()];
109 205 pending = cic_status & cic_mask;
110 /* check for PER interrupt */ 206 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
111 if (pending == (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
112 intbase = MSP_PER_INTBASE;
113 pending = *PER_INT_STS_REG & *PER_INT_MSK_REG;
114 }
115
116 /* check for spurious interrupt */
117 if (pending == 0x00000000) {
118 printk(KERN_ERR
119 "Spurious %s interrupt? status %08x, mask %08x\n",
120 (intbase == MSP_CIC_INTBASE) ? "CIC" : "PER",
121 (intbase == MSP_CIC_INTBASE) ?
122 *CIC_STS_REG : *PER_INT_STS_REG,
123 (intbase == MSP_CIC_INTBASE) ?
124 *CIC_VPE0_MSK_REG : *PER_INT_MSK_REG);
125 return;
126 }
127
128 /* check for the timer and dispatch it first */
129 if ((intbase == MSP_CIC_INTBASE) &&
130 (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))))
131 do_IRQ(MSP_INT_VPE0_TIMER); 207 do_IRQ(MSP_INT_VPE0_TIMER);
132 else 208 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
133 do_IRQ(ffs(pending) + intbase - 1); 209 do_IRQ(MSP_INT_VPE1_TIMER);
210 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
211 msp_per_irq_dispatch();
212 } else if (pending) {
213 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
214 } else{
215 spurious_interrupt();
216 }
134} 217}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
new file mode 100644
index 00000000000..f9b9dcdfa9d
--- /dev/null
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 *
4 * This file define the irq handler for MSP PER subsystem interrupts.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/spinlock.h>
16#include <linux/bitops.h>
17
18#include <asm/mipsregs.h>
19#include <asm/system.h>
20
21#include <msp_cic_int.h>
22#include <msp_regs.h>
23
24
25/*
26 * Convenience Macro. Should be somewhere generic.
27 */
28#define get_current_vpe() \
29 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
30
31#ifdef CONFIG_SMP
32/*
33 * The PER registers must be protected from concurrent access.
34 */
35
36static DEFINE_SPINLOCK(per_lock);
37#endif
38
39/* ensure writes to per are completed */
40
41static inline void per_wmb(void)
42{
43 const volatile void __iomem *per_mem = PER_INT_MSK_REG;
44 volatile u32 dummy_read;
45
46 wmb();
47 dummy_read = __raw_readl(per_mem);
48 dummy_read++;
49}
50
51static inline void unmask_per_irq(struct irq_data *d)
52{
53#ifdef CONFIG_SMP
54 unsigned long flags;
55 spin_lock_irqsave(&per_lock, flags);
56 *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
57 spin_unlock_irqrestore(&per_lock, flags);
58#else
59 *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
60#endif
61 per_wmb();
62}
63
64static inline void mask_per_irq(struct irq_data *d)
65{
66#ifdef CONFIG_SMP
67 unsigned long flags;
68 spin_lock_irqsave(&per_lock, flags);
69 *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
70 spin_unlock_irqrestore(&per_lock, flags);
71#else
72 *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
73#endif
74 per_wmb();
75}
76
77static inline void msp_per_irq_ack(struct irq_data *d)
78{
79 mask_per_irq(d);
80 /*
81 * In the PER interrupt controller, only bits 11 and 10
82 * are write-to-clear, (SPI TX complete, SPI RX complete).
83 * It does nothing for any others.
84 */
85 *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
86}
87
88#ifdef CONFIG_SMP
89static int msp_per_irq_set_affinity(struct irq_data *d,
90 const struct cpumask *affinity, bool force)
91{
92 /* WTF is this doing ????? */
93 unmask_per_irq(d);
94 return 0;
95}
96#endif
97
98static struct irq_chip msp_per_irq_controller = {
99 .name = "MSP_PER",
100 .irq_enable = unmask_per_irq.
101 .irq_disable = mask_per_irq,
102 .irq_ack = msp_per_irq_ack,
103#ifdef CONFIG_SMP
104 .irq_set_affinity = msp_per_irq_set_affinity,
105#endif
106};
107
108void __init msp_per_irq_init(void)
109{
110 int i;
111 /* Mask/clear interrupts. */
112 *PER_INT_MSK_REG = 0x00000000;
113 *PER_INT_STS_REG = 0xFFFFFFFF;
114 /* initialize all the IRQ descriptors */
115 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
116 irq_set_chip(i, &msp_per_irq_controller);
117#ifdef CONFIG_MIPS_MT_SMTC
118 irq_hwmask[i] = C_IRQ4;
119#endif
120 }
121}
122
123void msp_per_irq_dispatch(void)
124{
125 u32 per_mask = *PER_INT_MSK_REG;
126 u32 per_status = *PER_INT_STS_REG;
127 u32 pending;
128
129 pending = per_status & per_mask;
130 if (pending) {
131 do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
132 } else {
133 spurious_interrupt();
134 }
135}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
index 61f39023234..5bbcc47da6b 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
@@ -21,8 +21,10 @@
21#include <msp_slp_int.h> 21#include <msp_slp_int.h>
22#include <msp_regs.h> 22#include <msp_regs.h>
23 23
24static inline void unmask_msp_slp_irq(unsigned int irq) 24static inline void unmask_msp_slp_irq(struct irq_data *d)
25{ 25{
26 unsigned int irq = d->irq;
27
26 /* check for PER interrupt range */ 28 /* check for PER interrupt range */
27 if (irq < MSP_PER_INTBASE) 29 if (irq < MSP_PER_INTBASE)
28 *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE)); 30 *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE));
@@ -30,8 +32,10 @@ static inline void unmask_msp_slp_irq(unsigned int irq)
30 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE)); 32 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
31} 33}
32 34
33static inline void mask_msp_slp_irq(unsigned int irq) 35static inline void mask_msp_slp_irq(struct irq_data *d)
34{ 36{
37 unsigned int irq = d->irq;
38
35 /* check for PER interrupt range */ 39 /* check for PER interrupt range */
36 if (irq < MSP_PER_INTBASE) 40 if (irq < MSP_PER_INTBASE)
37 *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE)); 41 *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE));
@@ -43,8 +47,10 @@ static inline void mask_msp_slp_irq(unsigned int irq)
43 * While we ack the interrupt interrupts are disabled and thus we don't need 47 * While we ack the interrupt interrupts are disabled and thus we don't need
44 * to deal with concurrency issues. Same for msp_slp_irq_end. 48 * to deal with concurrency issues. Same for msp_slp_irq_end.
45 */ 49 */
46static inline void ack_msp_slp_irq(unsigned int irq) 50static inline void ack_msp_slp_irq(struct irq_data *d)
47{ 51{
52 unsigned int irq = d->irq;
53
48 /* check for PER interrupt range */ 54 /* check for PER interrupt range */
49 if (irq < MSP_PER_INTBASE) 55 if (irq < MSP_PER_INTBASE)
50 *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE)); 56 *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE));
@@ -54,9 +60,9 @@ static inline void ack_msp_slp_irq(unsigned int irq)
54 60
55static struct irq_chip msp_slp_irq_controller = { 61static struct irq_chip msp_slp_irq_controller = {
56 .name = "MSP_SLP", 62 .name = "MSP_SLP",
57 .ack = ack_msp_slp_irq, 63 .irq_ack = ack_msp_slp_irq,
58 .mask = mask_msp_slp_irq, 64 .irq_mask = mask_msp_slp_irq,
59 .unmask = unmask_msp_slp_irq, 65 .irq_unmask = unmask_msp_slp_irq,
60}; 66};
61 67
62void __init msp_slp_irq_init(void) 68void __init msp_slp_irq_init(void)
@@ -71,7 +77,7 @@ void __init msp_slp_irq_init(void)
71 77
72 /* initialize all the IRQ descriptors */ 78 /* initialize all the IRQ descriptors */
73 for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) 79 for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++)
74 set_irq_chip_and_handler(i, &msp_slp_irq_controller, 80 irq_set_chip_and_handler(i, &msp_slp_irq_controller,
75 handle_level_irq); 81 handle_level_irq);
76} 82}
77 83
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmc-sierra/msp71xx/msp_setup.c
index a54e85b3cf2..2413ea67877 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_setup.c
@@ -146,6 +146,8 @@ void __init plat_mem_setup(void)
146 pm_power_off = msp_power_off; 146 pm_power_off = msp_power_off;
147} 147}
148 148
149extern struct plat_smp_ops msp_smtc_smp_ops;
150
149void __init prom_init(void) 151void __init prom_init(void)
150{ 152{
151 unsigned long family; 153 unsigned long family;
@@ -226,10 +228,18 @@ void __init prom_init(void)
226 */ 228 */
227 msp_serial_setup(); 229 msp_serial_setup();
228 230
231#ifdef CONFIG_MIPS_MT_SMP
232 register_smp_ops(&vsmp_smp_ops);
233#endif
234
235#ifdef CONFIG_MIPS_MT_SMTC
236 register_smp_ops(&msp_smtc_smp_ops);
237#endif
238
229#ifdef CONFIG_PMCTWILED 239#ifdef CONFIG_PMCTWILED
230 /* 240 /*
231 * Setup LED states before the subsys_initcall loads other 241 * Setup LED states before the subsys_initcall loads other
232 * dependant drivers/modules. 242 * dependent drivers/modules.
233 */ 243 */
234 pmctwiled_setup(); 244 pmctwiled_setup();
235#endif 245#endif
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smp.c b/arch/mips/pmc-sierra/msp71xx/msp_smp.c
new file mode 100644
index 00000000000..bec17901ff0
--- /dev/null
+++ b/arch/mips/pmc-sierra/msp71xx/msp_smp.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
3 * Copyright (C) 2001 Ralf Baechle
4 * Copyright (C) 2010 PMC-Sierra, Inc.
5 *
6 * VSMP support for MSP platforms . Derived from malta vsmp support.
7 *
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 *
21 */
22#include <linux/smp.h>
23#include <linux/interrupt.h>
24
25#ifdef CONFIG_MIPS_MT_SMP
26#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
27#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */
28
29
30static void ipi_resched_dispatch(void)
31{
32 do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ);
33}
34
35static void ipi_call_dispatch(void)
36{
37 do_IRQ(MIPS_CPU_IPI_CALL_IRQ);
38}
39
40static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
41{
42 return IRQ_HANDLED;
43}
44
45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
46{
47 smp_call_function_interrupt();
48
49 return IRQ_HANDLED;
50}
51
52static struct irqaction irq_resched = {
53 .handler = ipi_resched_interrupt,
54 .flags = IRQF_DISABLED | IRQF_PERCPU,
55 .name = "IPI_resched"
56};
57
58static struct irqaction irq_call = {
59 .handler = ipi_call_interrupt,
60 .flags = IRQF_DISABLED | IRQF_PERCPU,
61 .name = "IPI_call"
62};
63
64void __init arch_init_ipiirq(int irq, struct irqaction *action)
65{
66 setup_irq(irq, action);
67 irq_set_handler(irq, handle_percpu_irq);
68}
69
70void __init msp_vsmp_int_init(void)
71{
72 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
73 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
74 arch_init_ipiirq(MIPS_CPU_IPI_RESCHED_IRQ, &irq_resched);
75 arch_init_ipiirq(MIPS_CPU_IPI_CALL_IRQ, &irq_call);
76}
77#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c b/arch/mips/pmc-sierra/msp71xx/msp_smtc.c
new file mode 100644
index 00000000000..c8dcc1c01e1
--- /dev/null
+++ b/arch/mips/pmc-sierra/msp71xx/msp_smtc.c
@@ -0,0 +1,105 @@
1/*
2 * MSP71xx Platform-specific hooks for SMP operation
3 */
4#include <linux/irq.h>
5#include <linux/init.h>
6
7#include <asm/mipsmtregs.h>
8#include <asm/mipsregs.h>
9#include <asm/smtc.h>
10#include <asm/smtc_ipi.h>
11
12/* VPE/SMP Prototype implements platform interfaces directly */
13
14/*
15 * Cause the specified action to be performed on a targeted "CPU"
16 */
17
18static void msp_smtc_send_ipi_single(int cpu, unsigned int action)
19{
20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22}
23
24static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
25 unsigned int action)
26{
27 unsigned int i;
28
29 for_each_cpu(i, mask)
30 msp_smtc_send_ipi_single(i, action);
31}
32
33/*
34 * Post-config but pre-boot cleanup entry point
35 */
36static void __cpuinit msp_smtc_init_secondary(void)
37{
38 int myvpe;
39
40 /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
41 myvpe = read_c0_tcbind() & TCBIND_CURVPE;
42 if (myvpe > 0)
43 change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
44 STATUSF_IP6 | STATUSF_IP7);
45 smtc_init_secondary();
46}
47
48/*
49 * Platform "CPU" startup hook
50 */
51static void __cpuinit msp_smtc_boot_secondary(int cpu,
52 struct task_struct *idle)
53{
54 smtc_boot_secondary(cpu, idle);
55}
56
57/*
58 * SMP initialization finalization entry point
59 */
60static void __cpuinit msp_smtc_smp_finish(void)
61{
62 smtc_smp_finish();
63}
64
65/*
66 * Hook for after all CPUs are online
67 */
68
69static void msp_smtc_cpus_done(void)
70{
71}
72
73/*
74 * Platform SMP pre-initialization
75 *
76 * As noted above, we can assume a single CPU for now
77 * but it may be multithreaded.
78 */
79
80static void __init msp_smtc_smp_setup(void)
81{
82 /*
83 * we won't get the definitive value until
84 * we've run smtc_prepare_cpus later, but
85 */
86
87 if (read_c0_config3() & (1 << 2))
88 smp_num_siblings = smtc_build_cpu_map(0);
89}
90
91static void __init msp_smtc_prepare_cpus(unsigned int max_cpus)
92{
93 smtc_prepare_cpus(max_cpus);
94}
95
96struct plat_smp_ops msp_smtc_smp_ops = {
97 .send_ipi_single = msp_smtc_send_ipi_single,
98 .send_ipi_mask = msp_smtc_send_ipi_mask,
99 .init_secondary = msp_smtc_init_secondary,
100 .smp_finish = msp_smtc_smp_finish,
101 .cpus_done = msp_smtc_cpus_done,
102 .boot_secondary = msp_smtc_boot_secondary,
103 .smp_setup = msp_smtc_smp_setup,
104 .prepare_cpus = msp_smtc_prepare_cpus,
105};
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
index cca64e15f57..8b42f307a7a 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/ptrace.h> 30#include <linux/ptrace.h>
31 31
32#include <asm/cevt-r4k.h>
32#include <asm/mipsregs.h> 33#include <asm/mipsregs.h>
33#include <asm/time.h> 34#include <asm/time.h>
34 35
@@ -36,6 +37,12 @@
36#include <msp_int.h> 37#include <msp_int.h>
37#include <msp_regs.h> 38#include <msp_regs.h>
38 39
40#define get_current_vpe() \
41 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
42
43static struct irqaction timer_vpe1;
44static int tim_installed;
45
39void __init plat_time_init(void) 46void __init plat_time_init(void)
40{ 47{
41 char *endp, *s; 48 char *endp, *s;
@@ -81,7 +88,14 @@ void __init plat_time_init(void)
81 mips_hpt_frequency = cpu_rate/2; 88 mips_hpt_frequency = cpu_rate/2;
82} 89}
83 90
84unsigned int __init get_c0_compare_int(void) 91unsigned int __cpuinit get_c0_compare_int(void)
85{ 92{
86 return MSP_INT_VPE0_TIMER; 93 /* MIPS_MT modes may want timer for second VPE */
94 if ((get_current_vpe()) && !tim_installed) {
95 memcpy(&timer_vpe1, &c0_compare_irqaction, sizeof(timer_vpe1));
96 setup_irq(MSP_INT_VPE1_TIMER, &timer_vpe1);
97 tim_installed++;
98 }
99
100 return get_current_vpe() ? MSP_INT_VPE1_TIMER : MSP_INT_VPE0_TIMER;
87} 101}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_usb.c b/arch/mips/pmc-sierra/msp71xx/msp_usb.c
index 0ee01e359dd..9a1aef89bd4 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_usb.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_usb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * The setup file for USB related hardware on PMC-Sierra MSP processors. 2 * The setup file for USB related hardware on PMC-Sierra MSP processors.
3 * 3 *
4 * Copyright 2006-2007 PMC-Sierra, Inc. 4 * Copyright 2006 PMC-Sierra, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -23,8 +23,8 @@
23 * with this program; if not, write to the Free Software Foundation, Inc., 23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 25 */
26#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET)
26 27
27#include <linux/dma-mapping.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
@@ -34,40 +34,56 @@
34#include <msp_regs.h> 34#include <msp_regs.h>
35#include <msp_int.h> 35#include <msp_int.h>
36#include <msp_prom.h> 36#include <msp_prom.h>
37#include <msp_usb.h>
38
37 39
38#if defined(CONFIG_USB_EHCI_HCD) 40#if defined(CONFIG_USB_EHCI_HCD)
39static struct resource msp_usbhost_resources [] = { 41static struct resource msp_usbhost0_resources[] = {
40 [0] = { 42 [0] = { /* EHCI-HS operational and capabilities registers */
41 .start = MSP_USB_BASE_START, 43 .start = MSP_USB0_HS_START,
42 .end = MSP_USB_BASE_END, 44 .end = MSP_USB0_HS_END,
43 .flags = IORESOURCE_MEM, 45 .flags = IORESOURCE_MEM,
44 }, 46 },
45 [1] = { 47 [1] = {
46 .start = MSP_INT_USB, 48 .start = MSP_INT_USB,
47 .end = MSP_INT_USB, 49 .end = MSP_INT_USB,
48 .flags = IORESOURCE_IRQ, 50 .flags = IORESOURCE_IRQ,
51 },
52 [2] = { /* MSBus-to-AMBA bridge register space */
53 .start = MSP_USB0_MAB_START,
54 .end = MSP_USB0_MAB_END,
55 .flags = IORESOURCE_MEM,
56 },
57 [3] = { /* Identification and general hardware parameters */
58 .start = MSP_USB0_ID_START,
59 .end = MSP_USB0_ID_END,
60 .flags = IORESOURCE_MEM,
49 }, 61 },
50}; 62};
51 63
52static u64 msp_usbhost_dma_mask = DMA_BIT_MASK(32); 64static u64 msp_usbhost0_dma_mask = 0xffffffffUL;
53 65
54static struct platform_device msp_usbhost_device = { 66static struct mspusb_device msp_usbhost0_device = {
55 .name = "pmcmsp-ehci",
56 .id = 0,
57 .dev = { 67 .dev = {
58 .dma_mask = &msp_usbhost_dma_mask, 68 .name = "pmcmsp-ehci",
59 .coherent_dma_mask = DMA_BIT_MASK(32), 69 .id = 0,
70 .dev = {
71 .dma_mask = &msp_usbhost0_dma_mask,
72 .coherent_dma_mask = 0xffffffffUL,
73 },
74 .num_resources = ARRAY_SIZE(msp_usbhost0_resources),
75 .resource = msp_usbhost0_resources,
60 }, 76 },
61 .num_resources = ARRAY_SIZE(msp_usbhost_resources),
62 .resource = msp_usbhost_resources,
63}; 77};
64#endif /* CONFIG_USB_EHCI_HCD */
65 78
66#if defined(CONFIG_USB_GADGET) 79/* MSP7140/MSP82XX has two USB2 hosts. */
67static struct resource msp_usbdev_resources [] = { 80#ifdef CONFIG_MSP_HAS_DUAL_USB
68 [0] = { 81static u64 msp_usbhost1_dma_mask = 0xffffffffUL;
69 .start = MSP_USB_BASE, 82
70 .end = MSP_USB_BASE_END, 83static struct resource msp_usbhost1_resources[] = {
84 [0] = { /* EHCI-HS operational and capabilities registers */
85 .start = MSP_USB1_HS_START,
86 .end = MSP_USB1_HS_END,
71 .flags = IORESOURCE_MEM, 87 .flags = IORESOURCE_MEM,
72 }, 88 },
73 [1] = { 89 [1] = {
@@ -75,76 +91,173 @@ static struct resource msp_usbdev_resources [] = {
75 .end = MSP_INT_USB, 91 .end = MSP_INT_USB,
76 .flags = IORESOURCE_IRQ, 92 .flags = IORESOURCE_IRQ,
77 }, 93 },
94 [2] = { /* MSBus-to-AMBA bridge register space */
95 .start = MSP_USB1_MAB_START,
96 .end = MSP_USB1_MAB_END,
97 .flags = IORESOURCE_MEM,
98 },
99 [3] = { /* Identification and general hardware parameters */
100 .start = MSP_USB1_ID_START,
101 .end = MSP_USB1_ID_END,
102 .flags = IORESOURCE_MEM,
103 },
104};
105
106static struct mspusb_device msp_usbhost1_device = {
107 .dev = {
108 .name = "pmcmsp-ehci",
109 .id = 1,
110 .dev = {
111 .dma_mask = &msp_usbhost1_dma_mask,
112 .coherent_dma_mask = 0xffffffffUL,
113 },
114 .num_resources = ARRAY_SIZE(msp_usbhost1_resources),
115 .resource = msp_usbhost1_resources,
116 },
78}; 117};
118#endif /* CONFIG_MSP_HAS_DUAL_USB */
119#endif /* CONFIG_USB_EHCI_HCD */
79 120
80static u64 msp_usbdev_dma_mask = DMA_BIT_MASK(32); 121#if defined(CONFIG_USB_GADGET)
122static struct resource msp_usbdev0_resources[] = {
123 [0] = { /* EHCI-HS operational and capabilities registers */
124 .start = MSP_USB0_HS_START,
125 .end = MSP_USB0_HS_END,
126 .flags = IORESOURCE_MEM,
127 },
128 [1] = {
129 .start = MSP_INT_USB,
130 .end = MSP_INT_USB,
131 .flags = IORESOURCE_IRQ,
132 },
133 [2] = { /* MSBus-to-AMBA bridge register space */
134 .start = MSP_USB0_MAB_START,
135 .end = MSP_USB0_MAB_END,
136 .flags = IORESOURCE_MEM,
137 },
138 [3] = { /* Identification and general hardware parameters */
139 .start = MSP_USB0_ID_START,
140 .end = MSP_USB0_ID_END,
141 .flags = IORESOURCE_MEM,
142 },
143};
81 144
82static struct platform_device msp_usbdev_device = { 145static u64 msp_usbdev_dma_mask = 0xffffffffUL;
83 .name = "msp71xx_udc", 146
84 .id = 0, 147/* This may need to be converted to a mspusb_device, too. */
148static struct mspusb_device msp_usbdev0_device = {
85 .dev = { 149 .dev = {
86 .dma_mask = &msp_usbdev_dma_mask, 150 .name = "msp71xx_udc",
87 .coherent_dma_mask = DMA_BIT_MASK(32), 151 .id = 0,
152 .dev = {
153 .dma_mask = &msp_usbdev_dma_mask,
154 .coherent_dma_mask = 0xffffffffUL,
155 },
156 .num_resources = ARRAY_SIZE(msp_usbdev0_resources),
157 .resource = msp_usbdev0_resources,
88 }, 158 },
89 .num_resources = ARRAY_SIZE(msp_usbdev_resources),
90 .resource = msp_usbdev_resources,
91}; 159};
92#endif /* CONFIG_USB_GADGET */
93 160
94#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET) 161#ifdef CONFIG_MSP_HAS_DUAL_USB
95static struct platform_device *msp_devs[1]; 162static struct resource msp_usbdev1_resources[] = {
96#endif 163 [0] = { /* EHCI-HS operational and capabilities registers */
164 .start = MSP_USB1_HS_START,
165 .end = MSP_USB1_HS_END,
166 .flags = IORESOURCE_MEM,
167 },
168 [1] = {
169 .start = MSP_INT_USB,
170 .end = MSP_INT_USB,
171 .flags = IORESOURCE_IRQ,
172 },
173 [2] = { /* MSBus-to-AMBA bridge register space */
174 .start = MSP_USB1_MAB_START,
175 .end = MSP_USB1_MAB_END,
176 .flags = IORESOURCE_MEM,
177 },
178 [3] = { /* Identification and general hardware parameters */
179 .start = MSP_USB1_ID_START,
180 .end = MSP_USB1_ID_END,
181 .flags = IORESOURCE_MEM,
182 },
183};
97 184
185/* This may need to be converted to a mspusb_device, too. */
186static struct mspusb_device msp_usbdev1_device = {
187 .dev = {
188 .name = "msp71xx_udc",
189 .id = 0,
190 .dev = {
191 .dma_mask = &msp_usbdev_dma_mask,
192 .coherent_dma_mask = 0xffffffffUL,
193 },
194 .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
195 .resource = msp_usbdev1_resources,
196 },
197};
198
199#endif /* CONFIG_MSP_HAS_DUAL_USB */
200#endif /* CONFIG_USB_GADGET */
98 201
99static int __init msp_usb_setup(void) 202static int __init msp_usb_setup(void)
100{ 203{
101#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET) 204 char *strp;
102 char *strp; 205 char envstr[32];
103 char envstr[32]; 206 struct platform_device *msp_devs[NUM_USB_DEVS];
104 unsigned int val = 0; 207 unsigned int val;
105 int result = 0;
106 208
209 /* construct environment name usbmode */
210 /* set usbmode <host/device> as pmon environment var */
107 /* 211 /*
108 * construct environment name usbmode 212 * Could this perhaps be integrated into the "features" env var?
109 * set usbmode <host/device> as pmon environment var 213 * Use the features key "U", and follow with "H" for host-mode,
214 * "D" for device-mode. If it works for Ethernet, why not USB...
215 * -- hammtrev, 2007/03/22
110 */ 216 */
111 snprintf((char *)&envstr[0], sizeof(envstr), "usbmode"); 217 snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
112 218
113#if defined(CONFIG_USB_EHCI_HCD) 219 /* set default host mode */
114 /* default to host mode */
115 val = 1; 220 val = 1;
116#endif
117 221
118 /* get environment string */ 222 /* get environment string */
119 strp = prom_getenv((char *)&envstr[0]); 223 strp = prom_getenv((char *)&envstr[0]);
120 if (strp) { 224 if (strp) {
225 /* compare string */
121 if (!strcmp(strp, "device")) 226 if (!strcmp(strp, "device"))
122 val = 0; 227 val = 0;
123 } 228 }
124 229
125 if (val) { 230 if (val) {
126#if defined(CONFIG_USB_EHCI_HCD) 231#if defined(CONFIG_USB_EHCI_HCD)
127 /* get host mode device */ 232 msp_devs[0] = &msp_usbhost0_device.dev;
128 msp_devs[0] = &msp_usbhost_device; 233 ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name);
129 ppfinit("platform add USB HOST done %s.\n", 234#ifdef CONFIG_MSP_HAS_DUAL_USB
130 msp_devs[0]->name); 235 msp_devs[1] = &msp_usbhost1_device.dev;
131 236 ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name);
132 result = platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs)); 237#endif
133#endif /* CONFIG_USB_EHCI_HCD */ 238#else
134 } 239 ppfinit("%s: echi_hcd not supported\n", __FILE__);
240#endif /* CONFIG_USB_EHCI_HCD */
241 } else {
135#if defined(CONFIG_USB_GADGET) 242#if defined(CONFIG_USB_GADGET)
136 else {
137 /* get device mode structure */ 243 /* get device mode structure */
138 msp_devs[0] = &msp_usbdev_device; 244 msp_devs[0] = &msp_usbdev0_device.dev;
139 ppfinit("platform add USB DEVICE done %s.\n", 245 ppfinit("platform add USB DEVICE done %s.\n"
140 msp_devs[0]->name); 246 , msp_devs[0]->name);
141 247#ifdef CONFIG_MSP_HAS_DUAL_USB
142 result = platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs)); 248 msp_devs[1] = &msp_usbdev1_device.dev;
249 ppfinit("platform add USB DEVICE done %s.\n"
250 , msp_devs[1]->name);
251#endif
252#else
253 ppfinit("%s: usb_gadget not supported\n", __FILE__);
254#endif /* CONFIG_USB_GADGET */
143 } 255 }
144#endif /* CONFIG_USB_GADGET */ 256 /* add device */
145#endif /* CONFIG_USB_EHCI_HCD || CONFIG_USB_GADGET */ 257 platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs));
146 258
147 return result; 259 return 0;
148} 260}
149 261
150subsys_initcall(msp_usb_setup); 262subsys_initcall(msp_usb_setup);
263#endif /* CONFIG_USB_EHCI_HCD || CONFIG_USB_GADGET */
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
index b16f95c3df6..02f5fb94ea2 100644
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ b/arch/mips/pmc-sierra/yosemite/Makefile
@@ -6,4 +6,4 @@ obj-y += irq.o prom.o py-console.o setup.o
6 6
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8 8
9EXTRA_CFLAGS += -Werror 9ccflags-y := -Werror
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index 941916f8aaf..adc171c8846 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -152,10 +152,6 @@ static inline void pnx833x_hard_disable_pic_irq(unsigned int irq)
152 PNX833X_PIC_INT_REG(irq) = 0; 152 PNX833X_PIC_INT_REG(irq) = 0;
153} 153}
154 154
155static int irqflags[PNX833X_PIC_NUM_IRQ]; /* initialized by zeroes */
156#define IRQFLAG_STARTED 1
157#define IRQFLAG_DISABLED 2
158
159static DEFINE_RAW_SPINLOCK(pnx833x_irq_lock); 155static DEFINE_RAW_SPINLOCK(pnx833x_irq_lock);
160 156
161static unsigned int pnx833x_startup_pic_irq(unsigned int irq) 157static unsigned int pnx833x_startup_pic_irq(unsigned int irq)
@@ -164,108 +160,54 @@ static unsigned int pnx833x_startup_pic_irq(unsigned int irq)
164 unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE; 160 unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE;
165 161
166 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); 162 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags);
167
168 irqflags[pic_irq] = IRQFLAG_STARTED; /* started, not disabled */
169 pnx833x_hard_enable_pic_irq(pic_irq); 163 pnx833x_hard_enable_pic_irq(pic_irq);
170
171 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); 164 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags);
172 return 0; 165 return 0;
173} 166}
174 167
175static void pnx833x_shutdown_pic_irq(unsigned int irq) 168static void pnx833x_enable_pic_irq(struct irq_data *d)
176{
177 unsigned long flags;
178 unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE;
179
180 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags);
181
182 irqflags[pic_irq] = 0; /* not started */
183 pnx833x_hard_disable_pic_irq(pic_irq);
184
185 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags);
186}
187
188static void pnx833x_enable_pic_irq(unsigned int irq)
189{ 169{
190 unsigned long flags; 170 unsigned long flags;
191 unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE; 171 unsigned int pic_irq = d->irq - PNX833X_PIC_IRQ_BASE;
192 172
193 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); 173 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags);
194 174 pnx833x_hard_enable_pic_irq(pic_irq);
195 irqflags[pic_irq] &= ~IRQFLAG_DISABLED;
196 if (irqflags[pic_irq] == IRQFLAG_STARTED)
197 pnx833x_hard_enable_pic_irq(pic_irq);
198
199 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); 175 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags);
200} 176}
201 177
202static void pnx833x_disable_pic_irq(unsigned int irq) 178static void pnx833x_disable_pic_irq(struct irq_data *d)
203{ 179{
204 unsigned long flags; 180 unsigned long flags;
205 unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE; 181 unsigned int pic_irq = d->irq - PNX833X_PIC_IRQ_BASE;
206 182
207 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); 183 raw_spin_lock_irqsave(&pnx833x_irq_lock, flags);
208
209 irqflags[pic_irq] |= IRQFLAG_DISABLED;
210 pnx833x_hard_disable_pic_irq(pic_irq); 184 pnx833x_hard_disable_pic_irq(pic_irq);
211
212 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); 185 raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags);
213} 186}
214 187
215static void pnx833x_ack_pic_irq(unsigned int irq)
216{
217}
218
219static void pnx833x_end_pic_irq(unsigned int irq)
220{
221}
222
223static DEFINE_RAW_SPINLOCK(pnx833x_gpio_pnx833x_irq_lock); 188static DEFINE_RAW_SPINLOCK(pnx833x_gpio_pnx833x_irq_lock);
224 189
225static unsigned int pnx833x_startup_gpio_irq(unsigned int irq) 190static void pnx833x_enable_gpio_irq(struct irq_data *d)
226{
227 int pin = irq - PNX833X_GPIO_IRQ_BASE;
228 unsigned long flags;
229 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags);
230 pnx833x_gpio_enable_irq(pin);
231 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags);
232 return 0;
233}
234
235static void pnx833x_enable_gpio_irq(unsigned int irq)
236{ 191{
237 int pin = irq - PNX833X_GPIO_IRQ_BASE; 192 int pin = d->irq - PNX833X_GPIO_IRQ_BASE;
238 unsigned long flags; 193 unsigned long flags;
239 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags); 194 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags);
240 pnx833x_gpio_enable_irq(pin); 195 pnx833x_gpio_enable_irq(pin);
241 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags); 196 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags);
242} 197}
243 198
244static void pnx833x_disable_gpio_irq(unsigned int irq) 199static void pnx833x_disable_gpio_irq(struct irq_data *d)
245{ 200{
246 int pin = irq - PNX833X_GPIO_IRQ_BASE; 201 int pin = d->irq - PNX833X_GPIO_IRQ_BASE;
247 unsigned long flags; 202 unsigned long flags;
248 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags); 203 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags);
249 pnx833x_gpio_disable_irq(pin); 204 pnx833x_gpio_disable_irq(pin);
250 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags); 205 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags);
251} 206}
252 207
253static void pnx833x_ack_gpio_irq(unsigned int irq) 208static int pnx833x_set_type_gpio_irq(struct irq_data *d, unsigned int flow_type)
254{
255}
256
257static void pnx833x_end_gpio_irq(unsigned int irq)
258{
259 int pin = irq - PNX833X_GPIO_IRQ_BASE;
260 unsigned long flags;
261 raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags);
262 pnx833x_gpio_clear_irq(pin);
263 raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags);
264}
265
266static int pnx833x_set_type_gpio_irq(unsigned int irq, unsigned int flow_type)
267{ 209{
268 int pin = irq - PNX833X_GPIO_IRQ_BASE; 210 int pin = d->irq - PNX833X_GPIO_IRQ_BASE;
269 int gpio_mode; 211 int gpio_mode;
270 212
271 switch (flow_type) { 213 switch (flow_type) {
@@ -296,23 +238,15 @@ static int pnx833x_set_type_gpio_irq(unsigned int irq, unsigned int flow_type)
296 238
297static struct irq_chip pnx833x_pic_irq_type = { 239static struct irq_chip pnx833x_pic_irq_type = {
298 .name = "PNX-PIC", 240 .name = "PNX-PIC",
299 .startup = pnx833x_startup_pic_irq, 241 .irq_enable = pnx833x_enable_pic_irq,
300 .shutdown = pnx833x_shutdown_pic_irq, 242 .irq_disable = pnx833x_disable_pic_irq,
301 .enable = pnx833x_enable_pic_irq,
302 .disable = pnx833x_disable_pic_irq,
303 .ack = pnx833x_ack_pic_irq,
304 .end = pnx833x_end_pic_irq
305}; 243};
306 244
307static struct irq_chip pnx833x_gpio_irq_type = { 245static struct irq_chip pnx833x_gpio_irq_type = {
308 .name = "PNX-GPIO", 246 .name = "PNX-GPIO",
309 .startup = pnx833x_startup_gpio_irq, 247 .irq_enable = pnx833x_enable_gpio_irq,
310 .shutdown = pnx833x_disable_gpio_irq, 248 .irq_disable = pnx833x_disable_gpio_irq,
311 .enable = pnx833x_enable_gpio_irq, 249 .irq_set_type = pnx833x_set_type_gpio_irq,
312 .disable = pnx833x_disable_gpio_irq,
313 .ack = pnx833x_ack_gpio_irq,
314 .end = pnx833x_end_gpio_irq,
315 .set_type = pnx833x_set_type_gpio_irq
316}; 250};
317 251
318void __init arch_init_irq(void) 252void __init arch_init_irq(void)
@@ -325,11 +259,13 @@ void __init arch_init_irq(void)
325 /* Set IRQ information in irq_desc */ 259 /* Set IRQ information in irq_desc */
326 for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) { 260 for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) {
327 pnx833x_hard_disable_pic_irq(irq); 261 pnx833x_hard_disable_pic_irq(irq);
328 set_irq_chip_and_handler(irq, &pnx833x_pic_irq_type, handle_simple_irq); 262 irq_set_chip_and_handler(irq, &pnx833x_pic_irq_type,
263 handle_simple_irq);
329 } 264 }
330 265
331 for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++) 266 for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++)
332 set_irq_chip_and_handler(irq, &pnx833x_gpio_irq_type, handle_simple_irq); 267 irq_set_chip_and_handler(irq, &pnx833x_gpio_irq_type,
268 handle_simple_irq);
333 269
334 /* Set PIC priority limiter register to 0 */ 270 /* Set PIC priority limiter register to 0 */
335 PNX833X_PIC_INT_PRIORITY = 0; 271 PNX833X_PIC_INT_PRIORITY = 0;
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index ce45df17fd0..87167dcc79f 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -165,7 +165,7 @@ static struct i2c_pnx0105_dev pnx833x_i2c_dev[] = {
165 { 165 {
166 .base = PNX833X_I2C0_PORTS_START, 166 .base = PNX833X_I2C0_PORTS_START,
167 .irq = -1, /* should be PNX833X_PIC_I2C0_INT but polling is faster */ 167 .irq = -1, /* should be PNX833X_PIC_I2C0_INT but polling is faster */
168 .clock = 6, /* 0 == 400 kHz, 4 == 100 kHz(Maximum HDMI), 6 = 50kHz(Prefered HDCP) */ 168 .clock = 6, /* 0 == 400 kHz, 4 == 100 kHz(Maximum HDMI), 6 = 50kHz(Preferred HDCP) */
169 .bus_addr = 0, /* no slave support */ 169 .bus_addr = 0, /* no slave support */
170 }, 170 },
171 { 171 {
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
index cfed5051dc6..6b93c81779c 100644
--- a/arch/mips/pnx8550/common/int.c
+++ b/arch/mips/pnx8550/common/int.c
@@ -114,8 +114,10 @@ static inline void unmask_gic_int(unsigned int irq_nr)
114 PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr]; 114 PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr];
115} 115}
116 116
117static inline void mask_irq(unsigned int irq_nr) 117static inline void mask_irq(struct irq_data *d)
118{ 118{
119 unsigned int irq_nr = d->irq;
120
119 if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) { 121 if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
120 modify_cp0_intmask(1 << irq_nr, 0); 122 modify_cp0_intmask(1 << irq_nr, 0);
121 } else if ((PNX8550_INT_GIC_MIN <= irq_nr) && 123 } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
@@ -129,8 +131,10 @@ static inline void mask_irq(unsigned int irq_nr)
129 } 131 }
130} 132}
131 133
132static inline void unmask_irq(unsigned int irq_nr) 134static inline void unmask_irq(struct irq_data *d)
133{ 135{
136 unsigned int irq_nr = d->irq;
137
134 if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) { 138 if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
135 modify_cp0_intmask(0, 1 << irq_nr); 139 modify_cp0_intmask(0, 1 << irq_nr);
136 } else if ((PNX8550_INT_GIC_MIN <= irq_nr) && 140 } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
@@ -157,10 +161,8 @@ int pnx8550_set_gic_priority(int irq, int priority)
157 161
158static struct irq_chip level_irq_type = { 162static struct irq_chip level_irq_type = {
159 .name = "PNX Level IRQ", 163 .name = "PNX Level IRQ",
160 .ack = mask_irq, 164 .irq_mask = mask_irq,
161 .mask = mask_irq, 165 .irq_unmask = unmask_irq,
162 .mask_ack = mask_irq,
163 .unmask = unmask_irq,
164}; 166};
165 167
166static struct irqaction gic_action = { 168static struct irqaction gic_action = {
@@ -180,10 +182,8 @@ void __init arch_init_irq(void)
180 int i; 182 int i;
181 int configPR; 183 int configPR;
182 184
183 for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) { 185 for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
184 set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 186 irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
185 mask_irq(i); /* mask the irq just in case */
186 }
187 187
188 /* init of GIC/IPC interrupts */ 188 /* init of GIC/IPC interrupts */
189 /* should be done before cp0 since cp0 init enables the GIC int */ 189 /* should be done before cp0 since cp0 init enables the GIC int */
@@ -206,7 +206,7 @@ void __init arch_init_irq(void)
206 /* mask/priority is still 0 so we will not get any 206 /* mask/priority is still 0 so we will not get any
207 * interrupts until it is unmasked */ 207 * interrupts until it is unmasked */
208 208
209 set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 209 irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
210 } 210 }
211 211
212 /* Priority level 0 */ 212 /* Priority level 0 */
@@ -215,20 +215,20 @@ void __init arch_init_irq(void)
215 /* Set int vector table address */ 215 /* Set int vector table address */
216 PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0; 216 PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
217 217
218 set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type, 218 irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
219 handle_level_irq); 219 handle_level_irq);
220 setup_irq(MIPS_CPU_GIC_IRQ, &gic_action); 220 setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
221 221
222 /* init of Timer interrupts */ 222 /* init of Timer interrupts */
223 for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) 223 for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
224 set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); 224 irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
225 225
226 /* Stop Timer 1-3 */ 226 /* Stop Timer 1-3 */
227 configPR = read_c0_config7(); 227 configPR = read_c0_config7();
228 configPR |= 0x00000038; 228 configPR |= 0x00000038;
229 write_c0_config7(configPR); 229 write_c0_config7(configPR);
230 230
231 set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type, 231 irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
232 handle_level_irq); 232 handle_level_irq);
233 setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action); 233 setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
234} 234}
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
index baf6e9092a9..348d2e850ef 100644
--- a/arch/mips/powertv/Makefile
+++ b/arch/mips/powertv/Makefile
@@ -28,4 +28,4 @@ obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
28 28
29obj-$(CONFIG_USB) += powertv-usb.o 29obj-$(CONFIG_USB) += powertv-usb.o
30 30
31EXTRA_CFLAGS += -Wall 31ccflags-y := -Wall
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
index f0e95dc0ac9..d810a33182a 100644
--- a/arch/mips/powertv/asic/Makefile
+++ b/arch/mips/powertv/asic/Makefile
@@ -20,4 +20,4 @@ obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
20 asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \ 20 asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
21 prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o 21 prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
22 22
23EXTRA_CFLAGS += -Wall -Werror 23ccflags-y := -Wall -Werror
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
index e5538243415..7fb97fb0931 100644
--- a/arch/mips/powertv/asic/irq_asic.c
+++ b/arch/mips/powertv/asic/irq_asic.c
@@ -21,9 +21,10 @@
21 21
22#include <asm/mach-powertv/asic_regs.h> 22#include <asm/mach-powertv/asic_regs.h>
23 23
24static inline void unmask_asic_irq(unsigned int irq) 24static inline void unmask_asic_irq(struct irq_data *d)
25{ 25{
26 unsigned long enable_bit; 26 unsigned long enable_bit;
27 unsigned int irq = d->irq;
27 28
28 enable_bit = (1 << (irq & 0x1f)); 29 enable_bit = (1 << (irq & 0x1f));
29 30
@@ -45,9 +46,10 @@ static inline void unmask_asic_irq(unsigned int irq)
45 } 46 }
46} 47}
47 48
48static inline void mask_asic_irq(unsigned int irq) 49static inline void mask_asic_irq(struct irq_data *d)
49{ 50{
50 unsigned long disable_mask; 51 unsigned long disable_mask;
52 unsigned int irq = d->irq;
51 53
52 disable_mask = ~(1 << (irq & 0x1f)); 54 disable_mask = ~(1 << (irq & 0x1f));
53 55
@@ -71,11 +73,8 @@ static inline void mask_asic_irq(unsigned int irq)
71 73
72static struct irq_chip asic_irq_chip = { 74static struct irq_chip asic_irq_chip = {
73 .name = "ASIC Level", 75 .name = "ASIC Level",
74 .ack = mask_asic_irq, 76 .irq_mask = mask_asic_irq,
75 .mask = mask_asic_irq, 77 .irq_unmask = unmask_asic_irq,
76 .mask_ack = mask_asic_irq,
77 .unmask = unmask_asic_irq,
78 .eoi = unmask_asic_irq,
79}; 78};
80 79
81void __init asic_irq_init(void) 80void __init asic_irq_init(void)
@@ -113,5 +112,5 @@ void __init asic_irq_init(void)
113 * Initialize interrupt handlers. 112 * Initialize interrupt handlers.
114 */ 113 */
115 for (i = 0; i < NR_IRQS; i++) 114 for (i = 0; i < NR_IRQS; i++)
116 set_irq_chip_and_handler(i, &asic_irq_chip, handle_level_irq); 115 irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
117} 116}
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
index f5c62462fc9..5783201cd2c 100644
--- a/arch/mips/powertv/pci/Makefile
+++ b/arch/mips/powertv/pci/Makefile
@@ -18,4 +18,4 @@
18 18
19obj-$(CONFIG_PCI) += fixup-powertv.o 19obj-$(CONFIG_PCI) += fixup-powertv.o
20 20
21EXTRA_CFLAGS += -Wall -Werror 21ccflags-y := -Wall -Werror
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c
index ea6cec3c1e0..7c6db74e3fa 100644
--- a/arch/mips/rb532/irq.c
+++ b/arch/mips/rb532/irq.c
@@ -111,10 +111,10 @@ static inline void ack_local_irq(unsigned int ip)
111 clear_c0_cause(ipnum); 111 clear_c0_cause(ipnum);
112} 112}
113 113
114static void rb532_enable_irq(unsigned int irq_nr) 114static void rb532_enable_irq(struct irq_data *d)
115{ 115{
116 unsigned int group, intr_bit, irq_nr = d->irq;
116 int ip = irq_nr - GROUP0_IRQ_BASE; 117 int ip = irq_nr - GROUP0_IRQ_BASE;
117 unsigned int group, intr_bit;
118 volatile unsigned int *addr; 118 volatile unsigned int *addr;
119 119
120 if (ip < 0) 120 if (ip < 0)
@@ -132,10 +132,10 @@ static void rb532_enable_irq(unsigned int irq_nr)
132 } 132 }
133} 133}
134 134
135static void rb532_disable_irq(unsigned int irq_nr) 135static void rb532_disable_irq(struct irq_data *d)
136{ 136{
137 unsigned int group, intr_bit, mask, irq_nr = d->irq;
137 int ip = irq_nr - GROUP0_IRQ_BASE; 138 int ip = irq_nr - GROUP0_IRQ_BASE;
138 unsigned int group, intr_bit, mask;
139 volatile unsigned int *addr; 139 volatile unsigned int *addr;
140 140
141 if (ip < 0) { 141 if (ip < 0) {
@@ -163,18 +163,18 @@ static void rb532_disable_irq(unsigned int irq_nr)
163 } 163 }
164} 164}
165 165
166static void rb532_mask_and_ack_irq(unsigned int irq_nr) 166static void rb532_mask_and_ack_irq(struct irq_data *d)
167{ 167{
168 rb532_disable_irq(irq_nr); 168 rb532_disable_irq(d);
169 ack_local_irq(group_to_ip(irq_to_group(irq_nr))); 169 ack_local_irq(group_to_ip(irq_to_group(d->irq)));
170} 170}
171 171
172static int rb532_set_type(unsigned int irq_nr, unsigned type) 172static int rb532_set_type(struct irq_data *d, unsigned type)
173{ 173{
174 int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; 174 int gpio = d->irq - GPIO_MAPPED_IRQ_BASE;
175 int group = irq_to_group(irq_nr); 175 int group = irq_to_group(d->irq);
176 176
177 if (group != GPIO_MAPPED_IRQ_GROUP || irq_nr > (GROUP4_IRQ_BASE + 13)) 177 if (group != GPIO_MAPPED_IRQ_GROUP || d->irq > (GROUP4_IRQ_BASE + 13))
178 return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; 178 return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
179 179
180 switch (type) { 180 switch (type) {
@@ -193,11 +193,11 @@ static int rb532_set_type(unsigned int irq_nr, unsigned type)
193 193
194static struct irq_chip rc32434_irq_type = { 194static struct irq_chip rc32434_irq_type = {
195 .name = "RB532", 195 .name = "RB532",
196 .ack = rb532_disable_irq, 196 .irq_ack = rb532_disable_irq,
197 .mask = rb532_disable_irq, 197 .irq_mask = rb532_disable_irq,
198 .mask_ack = rb532_mask_and_ack_irq, 198 .irq_mask_ack = rb532_mask_and_ack_irq,
199 .unmask = rb532_enable_irq, 199 .irq_unmask = rb532_enable_irq,
200 .set_type = rb532_set_type, 200 .irq_set_type = rb532_set_type,
201}; 201};
202 202
203void __init arch_init_irq(void) 203void __init arch_init_irq(void)
@@ -207,8 +207,8 @@ void __init arch_init_irq(void)
207 pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS); 207 pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS);
208 208
209 for (i = 0; i < RC32434_NR_IRQS; i++) 209 for (i = 0; i < RC32434_NR_IRQS; i++)
210 set_irq_chip_and_handler(i, &rc32434_irq_type, 210 irq_set_chip_and_handler(i, &rc32434_irq_type,
211 handle_level_irq); 211 handle_level_irq);
212} 212}
213 213
214/* Main Interrupt dispatcher */ 214/* Main Interrupt dispatcher */
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 383f11d7f44..476423a0129 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -31,88 +31,80 @@ static char lc3msk_to_irqnr[256];
31 31
32extern int ip22_eisa_init(void); 32extern int ip22_eisa_init(void);
33 33
34static void enable_local0_irq(unsigned int irq) 34static void enable_local0_irq(struct irq_data *d)
35{ 35{
36 /* don't allow mappable interrupt to be enabled from setup_irq, 36 /* don't allow mappable interrupt to be enabled from setup_irq,
37 * we have our own way to do so */ 37 * we have our own way to do so */
38 if (irq != SGI_MAP_0_IRQ) 38 if (d->irq != SGI_MAP_0_IRQ)
39 sgint->imask0 |= (1 << (irq - SGINT_LOCAL0)); 39 sgint->imask0 |= (1 << (d->irq - SGINT_LOCAL0));
40} 40}
41 41
42static void disable_local0_irq(unsigned int irq) 42static void disable_local0_irq(struct irq_data *d)
43{ 43{
44 sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0)); 44 sgint->imask0 &= ~(1 << (d->irq - SGINT_LOCAL0));
45} 45}
46 46
47static struct irq_chip ip22_local0_irq_type = { 47static struct irq_chip ip22_local0_irq_type = {
48 .name = "IP22 local 0", 48 .name = "IP22 local 0",
49 .ack = disable_local0_irq, 49 .irq_mask = disable_local0_irq,
50 .mask = disable_local0_irq, 50 .irq_unmask = enable_local0_irq,
51 .mask_ack = disable_local0_irq,
52 .unmask = enable_local0_irq,
53}; 51};
54 52
55static void enable_local1_irq(unsigned int irq) 53static void enable_local1_irq(struct irq_data *d)
56{ 54{
57 /* don't allow mappable interrupt to be enabled from setup_irq, 55 /* don't allow mappable interrupt to be enabled from setup_irq,
58 * we have our own way to do so */ 56 * we have our own way to do so */
59 if (irq != SGI_MAP_1_IRQ) 57 if (d->irq != SGI_MAP_1_IRQ)
60 sgint->imask1 |= (1 << (irq - SGINT_LOCAL1)); 58 sgint->imask1 |= (1 << (d->irq - SGINT_LOCAL1));
61} 59}
62 60
63static void disable_local1_irq(unsigned int irq) 61static void disable_local1_irq(struct irq_data *d)
64{ 62{
65 sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1)); 63 sgint->imask1 &= ~(1 << (d->irq - SGINT_LOCAL1));
66} 64}
67 65
68static struct irq_chip ip22_local1_irq_type = { 66static struct irq_chip ip22_local1_irq_type = {
69 .name = "IP22 local 1", 67 .name = "IP22 local 1",
70 .ack = disable_local1_irq, 68 .irq_mask = disable_local1_irq,
71 .mask = disable_local1_irq, 69 .irq_unmask = enable_local1_irq,
72 .mask_ack = disable_local1_irq,
73 .unmask = enable_local1_irq,
74}; 70};
75 71
76static void enable_local2_irq(unsigned int irq) 72static void enable_local2_irq(struct irq_data *d)
77{ 73{
78 sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0)); 74 sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
79 sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2)); 75 sgint->cmeimask0 |= (1 << (d->irq - SGINT_LOCAL2));
80} 76}
81 77
82static void disable_local2_irq(unsigned int irq) 78static void disable_local2_irq(struct irq_data *d)
83{ 79{
84 sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2)); 80 sgint->cmeimask0 &= ~(1 << (d->irq - SGINT_LOCAL2));
85 if (!sgint->cmeimask0) 81 if (!sgint->cmeimask0)
86 sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0)); 82 sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
87} 83}
88 84
89static struct irq_chip ip22_local2_irq_type = { 85static struct irq_chip ip22_local2_irq_type = {
90 .name = "IP22 local 2", 86 .name = "IP22 local 2",
91 .ack = disable_local2_irq, 87 .irq_mask = disable_local2_irq,
92 .mask = disable_local2_irq, 88 .irq_unmask = enable_local2_irq,
93 .mask_ack = disable_local2_irq,
94 .unmask = enable_local2_irq,
95}; 89};
96 90
97static void enable_local3_irq(unsigned int irq) 91static void enable_local3_irq(struct irq_data *d)
98{ 92{
99 sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1)); 93 sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
100 sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3)); 94 sgint->cmeimask1 |= (1 << (d->irq - SGINT_LOCAL3));
101} 95}
102 96
103static void disable_local3_irq(unsigned int irq) 97static void disable_local3_irq(struct irq_data *d)
104{ 98{
105 sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3)); 99 sgint->cmeimask1 &= ~(1 << (d->irq - SGINT_LOCAL3));
106 if (!sgint->cmeimask1) 100 if (!sgint->cmeimask1)
107 sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1)); 101 sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
108} 102}
109 103
110static struct irq_chip ip22_local3_irq_type = { 104static struct irq_chip ip22_local3_irq_type = {
111 .name = "IP22 local 3", 105 .name = "IP22 local 3",
112 .ack = disable_local3_irq, 106 .irq_mask = disable_local3_irq,
113 .mask = disable_local3_irq, 107 .irq_unmask = enable_local3_irq,
114 .mask_ack = disable_local3_irq,
115 .unmask = enable_local3_irq,
116}; 108};
117 109
118static void indy_local0_irqdispatch(void) 110static void indy_local0_irqdispatch(void)
@@ -320,7 +312,7 @@ void __init arch_init_irq(void)
320 else 312 else
321 handler = &ip22_local3_irq_type; 313 handler = &ip22_local3_irq_type;
322 314
323 set_irq_chip_and_handler(i, handler, handle_level_irq); 315 irq_set_chip_and_handler(i, handler, handle_level_irq);
324 } 316 }
325 317
326 /* vector handler. this register the IRQ as non-sharable */ 318 /* vector handler. this register the IRQ as non-sharable */
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index 5e960ae9735..bc5e9769bb7 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -1,7 +1,7 @@
1#config SGI_SN0_XXL 1#config SGI_SN0_XXL
2# bool "IP27 XXL" 2# bool "IP27 XXL"
3# depends on SGI_IP27 3# depends on SGI_IP27
4# This options adds support for userspace processes upto 16TB size. 4# This options adds support for userspace processes up to 16TB size.
5# Normally the limit is just .5TB. 5# Normally the limit is just .5TB.
6 6
7choice 7choice
diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO
index 19f1512c8f2..160857ff148 100644
--- a/arch/mips/sgi-ip27/TODO
+++ b/arch/mips/sgi-ip27/TODO
@@ -13,7 +13,7 @@ being invoked on all nodes in ip27-memory.c.
139. start_thread must turn off UX64 ... and define tlb_refill_debug. 139. start_thread must turn off UX64 ... and define tlb_refill_debug.
1410. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable 1410. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable
15does not agree with pgd_bad/pmd_bad. 15does not agree with pgd_bad/pmd_bad.
1611. All intrs (ip27_do_irq handlers) are targetted at cpu A on the node. 1611. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node.
17This might need to change later. Only the timer intr is set up to be 17This might need to change later. Only the timer intr is set up to be
18received on both Cpu A and B. (ip27_do_irq()/bridge_startup()) 18received on both Cpu A and B. (ip27_do_irq()/bridge_startup())
1913. Cache flushing (specially the SMP version) has to be investigated. 1913. Cache flushing (specially the SMP version) has to be investigated.
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 51d3a4f2d7e..923c080f77b 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -93,7 +93,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode)
93 93
94 /* 94 /*
95 * Some interrupts are reserved by hardware or by software convention. 95 * Some interrupts are reserved by hardware or by software convention.
96 * Mark these as reserved right away so they won't be used accidently 96 * Mark these as reserved right away so they won't be used accidentally
97 * later. 97 * later.
98 */ 98 */
99 for (i = 0; i <= BASE_PCI_IRQ; i++) { 99 for (i = 0; i <= BASE_PCI_IRQ; i++) {
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 6a123ea72de..0a04603d577 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -41,7 +41,7 @@
41 * Linux has a controller-independent x86 interrupt architecture. 41 * Linux has a controller-independent x86 interrupt architecture.
42 * every controller has a 'controller-template', that is used 42 * every controller has a 'controller-template', that is used
43 * by the main code to do the right thing. Each driver-visible 43 * by the main code to do the right thing. Each driver-visible
44 * interrupt source is transparently wired to the apropriate 44 * interrupt source is transparently wired to the appropriate
45 * controller. Thus drivers need not be aware of the 45 * controller. Thus drivers need not be aware of the
46 * interrupt-controller. 46 * interrupt-controller.
47 * 47 *
@@ -240,7 +240,7 @@ static int intr_disconnect_level(int cpu, int bit)
240} 240}
241 241
242/* Startup one of the (PCI ...) IRQs routes over a bridge. */ 242/* Startup one of the (PCI ...) IRQs routes over a bridge. */
243static unsigned int startup_bridge_irq(unsigned int irq) 243static unsigned int startup_bridge_irq(struct irq_data *d)
244{ 244{
245 struct bridge_controller *bc; 245 struct bridge_controller *bc;
246 bridgereg_t device; 246 bridgereg_t device;
@@ -248,16 +248,16 @@ static unsigned int startup_bridge_irq(unsigned int irq)
248 int pin, swlevel; 248 int pin, swlevel;
249 cpuid_t cpu; 249 cpuid_t cpu;
250 250
251 pin = SLOT_FROM_PCI_IRQ(irq); 251 pin = SLOT_FROM_PCI_IRQ(d->irq);
252 bc = IRQ_TO_BRIDGE(irq); 252 bc = IRQ_TO_BRIDGE(d->irq);
253 bridge = bc->base; 253 bridge = bc->base;
254 254
255 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin); 255 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin);
256 /* 256 /*
257 * "map" irq to a swlevel greater than 6 since the first 6 bits 257 * "map" irq to a swlevel greater than 6 since the first 6 bits
258 * of INT_PEND0 are taken 258 * of INT_PEND0 are taken
259 */ 259 */
260 swlevel = find_level(&cpu, irq); 260 swlevel = find_level(&cpu, d->irq);
261 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); 261 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
262 bridge->b_int_enable |= (1 << pin); 262 bridge->b_int_enable |= (1 << pin);
263 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ 263 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
@@ -288,58 +288,56 @@ static unsigned int startup_bridge_irq(unsigned int irq)
288} 288}
289 289
290/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ 290/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
291static void shutdown_bridge_irq(unsigned int irq) 291static void shutdown_bridge_irq(struct irq_data *d)
292{ 292{
293 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq); 293 struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq);
294 bridge_t *bridge = bc->base; 294 bridge_t *bridge = bc->base;
295 int pin, swlevel; 295 int pin, swlevel;
296 cpuid_t cpu; 296 cpuid_t cpu;
297 297
298 pr_debug("bridge_shutdown: irq 0x%x\n", irq); 298 pr_debug("bridge_shutdown: irq 0x%x\n", d->irq);
299 pin = SLOT_FROM_PCI_IRQ(irq); 299 pin = SLOT_FROM_PCI_IRQ(d->irq);
300 300
301 /* 301 /*
302 * map irq to a swlevel greater than 6 since the first 6 bits 302 * map irq to a swlevel greater than 6 since the first 6 bits
303 * of INT_PEND0 are taken 303 * of INT_PEND0 are taken
304 */ 304 */
305 swlevel = find_level(&cpu, irq); 305 swlevel = find_level(&cpu, d->irq);
306 intr_disconnect_level(cpu, swlevel); 306 intr_disconnect_level(cpu, swlevel);
307 307
308 bridge->b_int_enable &= ~(1 << pin); 308 bridge->b_int_enable &= ~(1 << pin);
309 bridge->b_wid_tflush; 309 bridge->b_wid_tflush;
310} 310}
311 311
312static inline void enable_bridge_irq(unsigned int irq) 312static inline void enable_bridge_irq(struct irq_data *d)
313{ 313{
314 cpuid_t cpu; 314 cpuid_t cpu;
315 int swlevel; 315 int swlevel;
316 316
317 swlevel = find_level(&cpu, irq); /* Criminal offence */ 317 swlevel = find_level(&cpu, d->irq); /* Criminal offence */
318 intr_connect_level(cpu, swlevel); 318 intr_connect_level(cpu, swlevel);
319} 319}
320 320
321static inline void disable_bridge_irq(unsigned int irq) 321static inline void disable_bridge_irq(struct irq_data *d)
322{ 322{
323 cpuid_t cpu; 323 cpuid_t cpu;
324 int swlevel; 324 int swlevel;
325 325
326 swlevel = find_level(&cpu, irq); /* Criminal offence */ 326 swlevel = find_level(&cpu, d->irq); /* Criminal offence */
327 intr_disconnect_level(cpu, swlevel); 327 intr_disconnect_level(cpu, swlevel);
328} 328}
329 329
330static struct irq_chip bridge_irq_type = { 330static struct irq_chip bridge_irq_type = {
331 .name = "bridge", 331 .name = "bridge",
332 .startup = startup_bridge_irq, 332 .irq_startup = startup_bridge_irq,
333 .shutdown = shutdown_bridge_irq, 333 .irq_shutdown = shutdown_bridge_irq,
334 .ack = disable_bridge_irq, 334 .irq_mask = disable_bridge_irq,
335 .mask = disable_bridge_irq, 335 .irq_unmask = enable_bridge_irq,
336 .mask_ack = disable_bridge_irq,
337 .unmask = enable_bridge_irq,
338}; 336};
339 337
340void __devinit register_bridge_irq(unsigned int irq) 338void __devinit register_bridge_irq(unsigned int irq)
341{ 339{
342 set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); 340 irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
343} 341}
344 342
345int __devinit request_bridge_irq(struct bridge_controller *bc) 343int __devinit request_bridge_irq(struct bridge_controller *bc)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 3cac88382d4..8d0d2690e96 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -36,21 +36,18 @@
36#include <asm/sn/sn0/hubio.h> 36#include <asm/sn/sn0/hubio.h>
37#include <asm/pci/bridge.h> 37#include <asm/pci/bridge.h>
38 38
39static void enable_rt_irq(unsigned int irq) 39static void enable_rt_irq(struct irq_data *d)
40{ 40{
41} 41}
42 42
43static void disable_rt_irq(unsigned int irq) 43static void disable_rt_irq(struct irq_data *d)
44{ 44{
45} 45}
46 46
47static struct irq_chip rt_irq_type = { 47static struct irq_chip rt_irq_type = {
48 .name = "SN HUB RT timer", 48 .name = "SN HUB RT timer",
49 .ack = disable_rt_irq, 49 .irq_mask = disable_rt_irq,
50 .mask = disable_rt_irq, 50 .irq_unmask = enable_rt_irq,
51 .mask_ack = disable_rt_irq,
52 .unmask = enable_rt_irq,
53 .eoi = enable_rt_irq,
54}; 51};
55 52
56static int rt_next_event(unsigned long delta, struct clock_event_device *evt) 53static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
@@ -156,7 +153,7 @@ static void __init hub_rt_clock_event_global_init(void)
156 panic("Allocation of irq number for timer failed"); 153 panic("Allocation of irq number for timer failed");
157 } while (xchg(&rt_timer_irq, irq)); 154 } while (xchg(&rt_timer_irq, irq));
158 155
159 set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); 156 irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
160 setup_irq(irq, &hub_rt_irqaction); 157 setup_irq(irq, &hub_rt_irqaction);
161} 158}
162 159
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index eb40824b172..c65ea76d56c 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -130,70 +130,48 @@ static struct irqaction cpuerr_irq = {
130 130
131static uint64_t crime_mask; 131static uint64_t crime_mask;
132 132
133static inline void crime_enable_irq(unsigned int irq) 133static inline void crime_enable_irq(struct irq_data *d)
134{ 134{
135 unsigned int bit = irq - CRIME_IRQ_BASE; 135 unsigned int bit = d->irq - CRIME_IRQ_BASE;
136 136
137 crime_mask |= 1 << bit; 137 crime_mask |= 1 << bit;
138 crime->imask = crime_mask; 138 crime->imask = crime_mask;
139} 139}
140 140
141static inline void crime_disable_irq(unsigned int irq) 141static inline void crime_disable_irq(struct irq_data *d)
142{ 142{
143 unsigned int bit = irq - CRIME_IRQ_BASE; 143 unsigned int bit = d->irq - CRIME_IRQ_BASE;
144 144
145 crime_mask &= ~(1 << bit); 145 crime_mask &= ~(1 << bit);
146 crime->imask = crime_mask; 146 crime->imask = crime_mask;
147 flush_crime_bus(); 147 flush_crime_bus();
148} 148}
149 149
150static void crime_level_mask_and_ack_irq(unsigned int irq)
151{
152 crime_disable_irq(irq);
153}
154
155static void crime_level_end_irq(unsigned int irq)
156{
157 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
158 crime_enable_irq(irq);
159}
160
161static struct irq_chip crime_level_interrupt = { 150static struct irq_chip crime_level_interrupt = {
162 .name = "IP32 CRIME", 151 .name = "IP32 CRIME",
163 .ack = crime_level_mask_and_ack_irq, 152 .irq_mask = crime_disable_irq,
164 .mask = crime_disable_irq, 153 .irq_unmask = crime_enable_irq,
165 .mask_ack = crime_level_mask_and_ack_irq,
166 .unmask = crime_enable_irq,
167 .end = crime_level_end_irq,
168}; 154};
169 155
170static void crime_edge_mask_and_ack_irq(unsigned int irq) 156static void crime_edge_mask_and_ack_irq(struct irq_data *d)
171{ 157{
172 unsigned int bit = irq - CRIME_IRQ_BASE; 158 unsigned int bit = d->irq - CRIME_IRQ_BASE;
173 uint64_t crime_int; 159 uint64_t crime_int;
174 160
175 /* Edge triggered interrupts must be cleared. */ 161 /* Edge triggered interrupts must be cleared. */
176
177 crime_int = crime->hard_int; 162 crime_int = crime->hard_int;
178 crime_int &= ~(1 << bit); 163 crime_int &= ~(1 << bit);
179 crime->hard_int = crime_int; 164 crime->hard_int = crime_int;
180 165
181 crime_disable_irq(irq); 166 crime_disable_irq(d);
182}
183
184static void crime_edge_end_irq(unsigned int irq)
185{
186 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
187 crime_enable_irq(irq);
188} 167}
189 168
190static struct irq_chip crime_edge_interrupt = { 169static struct irq_chip crime_edge_interrupt = {
191 .name = "IP32 CRIME", 170 .name = "IP32 CRIME",
192 .ack = crime_edge_mask_and_ack_irq, 171 .irq_ack = crime_edge_mask_and_ack_irq,
193 .mask = crime_disable_irq, 172 .irq_mask = crime_disable_irq,
194 .mask_ack = crime_edge_mask_and_ack_irq, 173 .irq_mask_ack = crime_edge_mask_and_ack_irq,
195 .unmask = crime_enable_irq, 174 .irq_unmask = crime_enable_irq,
196 .end = crime_edge_end_irq,
197}; 175};
198 176
199/* 177/*
@@ -204,37 +182,28 @@ static struct irq_chip crime_edge_interrupt = {
204 182
205static unsigned long macepci_mask; 183static unsigned long macepci_mask;
206 184
207static void enable_macepci_irq(unsigned int irq) 185static void enable_macepci_irq(struct irq_data *d)
208{ 186{
209 macepci_mask |= MACEPCI_CONTROL_INT(irq - MACEPCI_SCSI0_IRQ); 187 macepci_mask |= MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
210 mace->pci.control = macepci_mask; 188 mace->pci.control = macepci_mask;
211 crime_mask |= 1 << (irq - CRIME_IRQ_BASE); 189 crime_mask |= 1 << (d->irq - CRIME_IRQ_BASE);
212 crime->imask = crime_mask; 190 crime->imask = crime_mask;
213} 191}
214 192
215static void disable_macepci_irq(unsigned int irq) 193static void disable_macepci_irq(struct irq_data *d)
216{ 194{
217 crime_mask &= ~(1 << (irq - CRIME_IRQ_BASE)); 195 crime_mask &= ~(1 << (d->irq - CRIME_IRQ_BASE));
218 crime->imask = crime_mask; 196 crime->imask = crime_mask;
219 flush_crime_bus(); 197 flush_crime_bus();
220 macepci_mask &= ~MACEPCI_CONTROL_INT(irq - MACEPCI_SCSI0_IRQ); 198 macepci_mask &= ~MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ);
221 mace->pci.control = macepci_mask; 199 mace->pci.control = macepci_mask;
222 flush_mace_bus(); 200 flush_mace_bus();
223} 201}
224 202
225static void end_macepci_irq(unsigned int irq)
226{
227 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
228 enable_macepci_irq(irq);
229}
230
231static struct irq_chip ip32_macepci_interrupt = { 203static struct irq_chip ip32_macepci_interrupt = {
232 .name = "IP32 MACE PCI", 204 .name = "IP32 MACE PCI",
233 .ack = disable_macepci_irq, 205 .irq_mask = disable_macepci_irq,
234 .mask = disable_macepci_irq, 206 .irq_unmask = enable_macepci_irq,
235 .mask_ack = disable_macepci_irq,
236 .unmask = enable_macepci_irq,
237 .end = end_macepci_irq,
238}; 207};
239 208
240/* This is used for MACE ISA interrupts. That means bits 4-6 in the 209/* This is used for MACE ISA interrupts. That means bits 4-6 in the
@@ -276,13 +245,13 @@ static struct irq_chip ip32_macepci_interrupt = {
276 245
277static unsigned long maceisa_mask; 246static unsigned long maceisa_mask;
278 247
279static void enable_maceisa_irq(unsigned int irq) 248static void enable_maceisa_irq(struct irq_data *d)
280{ 249{
281 unsigned int crime_int = 0; 250 unsigned int crime_int = 0;
282 251
283 pr_debug("maceisa enable: %u\n", irq); 252 pr_debug("maceisa enable: %u\n", d->irq);
284 253
285 switch (irq) { 254 switch (d->irq) {
286 case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ: 255 case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ:
287 crime_int = MACE_AUDIO_INT; 256 crime_int = MACE_AUDIO_INT;
288 break; 257 break;
@@ -296,15 +265,15 @@ static void enable_maceisa_irq(unsigned int irq)
296 pr_debug("crime_int %08x enabled\n", crime_int); 265 pr_debug("crime_int %08x enabled\n", crime_int);
297 crime_mask |= crime_int; 266 crime_mask |= crime_int;
298 crime->imask = crime_mask; 267 crime->imask = crime_mask;
299 maceisa_mask |= 1 << (irq - MACEISA_AUDIO_SW_IRQ); 268 maceisa_mask |= 1 << (d->irq - MACEISA_AUDIO_SW_IRQ);
300 mace->perif.ctrl.imask = maceisa_mask; 269 mace->perif.ctrl.imask = maceisa_mask;
301} 270}
302 271
303static void disable_maceisa_irq(unsigned int irq) 272static void disable_maceisa_irq(struct irq_data *d)
304{ 273{
305 unsigned int crime_int = 0; 274 unsigned int crime_int = 0;
306 275
307 maceisa_mask &= ~(1 << (irq - MACEISA_AUDIO_SW_IRQ)); 276 maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
308 if (!(maceisa_mask & MACEISA_AUDIO_INT)) 277 if (!(maceisa_mask & MACEISA_AUDIO_INT))
309 crime_int |= MACE_AUDIO_INT; 278 crime_int |= MACE_AUDIO_INT;
310 if (!(maceisa_mask & MACEISA_MISC_INT)) 279 if (!(maceisa_mask & MACEISA_MISC_INT))
@@ -318,76 +287,57 @@ static void disable_maceisa_irq(unsigned int irq)
318 flush_mace_bus(); 287 flush_mace_bus();
319} 288}
320 289
321static void mask_and_ack_maceisa_irq(unsigned int irq) 290static void mask_and_ack_maceisa_irq(struct irq_data *d)
322{ 291{
323 unsigned long mace_int; 292 unsigned long mace_int;
324 293
325 /* edge triggered */ 294 /* edge triggered */
326 mace_int = mace->perif.ctrl.istat; 295 mace_int = mace->perif.ctrl.istat;
327 mace_int &= ~(1 << (irq - MACEISA_AUDIO_SW_IRQ)); 296 mace_int &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
328 mace->perif.ctrl.istat = mace_int; 297 mace->perif.ctrl.istat = mace_int;
329 298
330 disable_maceisa_irq(irq); 299 disable_maceisa_irq(d);
331}
332
333static void end_maceisa_irq(unsigned irq)
334{
335 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
336 enable_maceisa_irq(irq);
337} 300}
338 301
339static struct irq_chip ip32_maceisa_level_interrupt = { 302static struct irq_chip ip32_maceisa_level_interrupt = {
340 .name = "IP32 MACE ISA", 303 .name = "IP32 MACE ISA",
341 .ack = disable_maceisa_irq, 304 .irq_mask = disable_maceisa_irq,
342 .mask = disable_maceisa_irq, 305 .irq_unmask = enable_maceisa_irq,
343 .mask_ack = disable_maceisa_irq,
344 .unmask = enable_maceisa_irq,
345 .end = end_maceisa_irq,
346}; 306};
347 307
348static struct irq_chip ip32_maceisa_edge_interrupt = { 308static struct irq_chip ip32_maceisa_edge_interrupt = {
349 .name = "IP32 MACE ISA", 309 .name = "IP32 MACE ISA",
350 .ack = mask_and_ack_maceisa_irq, 310 .irq_ack = mask_and_ack_maceisa_irq,
351 .mask = disable_maceisa_irq, 311 .irq_mask = disable_maceisa_irq,
352 .mask_ack = mask_and_ack_maceisa_irq, 312 .irq_mask_ack = mask_and_ack_maceisa_irq,
353 .unmask = enable_maceisa_irq, 313 .irq_unmask = enable_maceisa_irq,
354 .end = end_maceisa_irq,
355}; 314};
356 315
357/* This is used for regular non-ISA, non-PCI MACE interrupts. That means 316/* This is used for regular non-ISA, non-PCI MACE interrupts. That means
358 * bits 0-3 and 7 in the CRIME register. 317 * bits 0-3 and 7 in the CRIME register.
359 */ 318 */
360 319
361static void enable_mace_irq(unsigned int irq) 320static void enable_mace_irq(struct irq_data *d)
362{ 321{
363 unsigned int bit = irq - CRIME_IRQ_BASE; 322 unsigned int bit = d->irq - CRIME_IRQ_BASE;
364 323
365 crime_mask |= (1 << bit); 324 crime_mask |= (1 << bit);
366 crime->imask = crime_mask; 325 crime->imask = crime_mask;
367} 326}
368 327
369static void disable_mace_irq(unsigned int irq) 328static void disable_mace_irq(struct irq_data *d)
370{ 329{
371 unsigned int bit = irq - CRIME_IRQ_BASE; 330 unsigned int bit = d->irq - CRIME_IRQ_BASE;
372 331
373 crime_mask &= ~(1 << bit); 332 crime_mask &= ~(1 << bit);
374 crime->imask = crime_mask; 333 crime->imask = crime_mask;
375 flush_crime_bus(); 334 flush_crime_bus();
376} 335}
377 336
378static void end_mace_irq(unsigned int irq)
379{
380 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
381 enable_mace_irq(irq);
382}
383
384static struct irq_chip ip32_mace_interrupt = { 337static struct irq_chip ip32_mace_interrupt = {
385 .name = "IP32 MACE", 338 .name = "IP32 MACE",
386 .ack = disable_mace_irq, 339 .irq_mask = disable_mace_irq,
387 .mask = disable_mace_irq, 340 .irq_unmask = enable_mace_irq,
388 .mask_ack = disable_mace_irq,
389 .unmask = enable_mace_irq,
390 .end = end_mace_irq,
391}; 341};
392 342
393static void ip32_unknown_interrupt(void) 343static void ip32_unknown_interrupt(void)
@@ -501,43 +451,51 @@ void __init arch_init_irq(void)
501 for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { 451 for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) {
502 switch (irq) { 452 switch (irq) {
503 case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: 453 case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ:
504 set_irq_chip_and_handler_name(irq,&ip32_mace_interrupt, 454 irq_set_chip_and_handler_name(irq,
505 handle_level_irq, "level"); 455 &ip32_mace_interrupt,
456 handle_level_irq,
457 "level");
506 break; 458 break;
507 459
508 case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: 460 case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ:
509 set_irq_chip_and_handler_name(irq, 461 irq_set_chip_and_handler_name(irq,
510 &ip32_macepci_interrupt, handle_level_irq, 462 &ip32_macepci_interrupt,
511 "level"); 463 handle_level_irq,
464 "level");
512 break; 465 break;
513 466
514 case CRIME_CPUERR_IRQ: 467 case CRIME_CPUERR_IRQ:
515 case CRIME_MEMERR_IRQ: 468 case CRIME_MEMERR_IRQ:
516 set_irq_chip_and_handler_name(irq, 469 irq_set_chip_and_handler_name(irq,
517 &crime_level_interrupt, handle_level_irq, 470 &crime_level_interrupt,
518 "level"); 471 handle_level_irq,
472 "level");
519 break; 473 break;
520 474
521 case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: 475 case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ:
522 case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: 476 case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ:
523 case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: 477 case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ:
524 case CRIME_VICE_IRQ: 478 case CRIME_VICE_IRQ:
525 set_irq_chip_and_handler_name(irq, 479 irq_set_chip_and_handler_name(irq,
526 &crime_edge_interrupt, handle_edge_irq, "edge"); 480 &crime_edge_interrupt,
481 handle_edge_irq,
482 "edge");
527 break; 483 break;
528 484
529 case MACEISA_PARALLEL_IRQ: 485 case MACEISA_PARALLEL_IRQ:
530 case MACEISA_SERIAL1_TDMAPR_IRQ: 486 case MACEISA_SERIAL1_TDMAPR_IRQ:
531 case MACEISA_SERIAL2_TDMAPR_IRQ: 487 case MACEISA_SERIAL2_TDMAPR_IRQ:
532 set_irq_chip_and_handler_name(irq, 488 irq_set_chip_and_handler_name(irq,
533 &ip32_maceisa_edge_interrupt, handle_edge_irq, 489 &ip32_maceisa_edge_interrupt,
534 "edge"); 490 handle_edge_irq,
491 "edge");
535 break; 492 break;
536 493
537 default: 494 default:
538 set_irq_chip_and_handler_name(irq, 495 irq_set_chip_and_handler_name(irq,
539 &ip32_maceisa_level_interrupt, handle_level_irq, 496 &ip32_maceisa_level_interrupt,
540 "level"); 497 handle_level_irq,
498 "level");
541 break; 499 break;
542 } 500 }
543 } 501 }
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 044bbe462c2..09740d60e18 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -44,31 +44,10 @@
44 * for interrupt lines 44 * for interrupt lines
45 */ 45 */
46 46
47
48static void end_bcm1480_irq(unsigned int irq);
49static void enable_bcm1480_irq(unsigned int irq);
50static void disable_bcm1480_irq(unsigned int irq);
51static void ack_bcm1480_irq(unsigned int irq);
52#ifdef CONFIG_SMP
53static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
54#endif
55
56#ifdef CONFIG_PCI 47#ifdef CONFIG_PCI
57extern unsigned long ht_eoi_space; 48extern unsigned long ht_eoi_space;
58#endif 49#endif
59 50
60static struct irq_chip bcm1480_irq_type = {
61 .name = "BCM1480-IMR",
62 .ack = ack_bcm1480_irq,
63 .mask = disable_bcm1480_irq,
64 .mask_ack = ack_bcm1480_irq,
65 .unmask = enable_bcm1480_irq,
66 .end = end_bcm1480_irq,
67#ifdef CONFIG_SMP
68 .set_affinity = bcm1480_set_affinity
69#endif
70};
71
72/* Store the CPU id (not the logical number) */ 51/* Store the CPU id (not the logical number) */
73int bcm1480_irq_owner[BCM1480_NR_IRQS]; 52int bcm1480_irq_owner[BCM1480_NR_IRQS];
74 53
@@ -109,12 +88,13 @@ void bcm1480_unmask_irq(int cpu, int irq)
109} 88}
110 89
111#ifdef CONFIG_SMP 90#ifdef CONFIG_SMP
112static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) 91static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
92 bool force)
113{ 93{
94 unsigned int irq_dirty, irq = d->irq;
114 int i = 0, old_cpu, cpu, int_on, k; 95 int i = 0, old_cpu, cpu, int_on, k;
115 u64 cur_ints; 96 u64 cur_ints;
116 unsigned long flags; 97 unsigned long flags;
117 unsigned int irq_dirty;
118 98
119 i = cpumask_first(mask); 99 i = cpumask_first(mask);
120 100
@@ -156,21 +136,25 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
156 136
157/*****************************************************************************/ 137/*****************************************************************************/
158 138
159static void disable_bcm1480_irq(unsigned int irq) 139static void disable_bcm1480_irq(struct irq_data *d)
160{ 140{
141 unsigned int irq = d->irq;
142
161 bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); 143 bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
162} 144}
163 145
164static void enable_bcm1480_irq(unsigned int irq) 146static void enable_bcm1480_irq(struct irq_data *d)
165{ 147{
148 unsigned int irq = d->irq;
149
166 bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq); 150 bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
167} 151}
168 152
169 153
170static void ack_bcm1480_irq(unsigned int irq) 154static void ack_bcm1480_irq(struct irq_data *d)
171{ 155{
156 unsigned int irq_dirty, irq = d->irq;
172 u64 pending; 157 u64 pending;
173 unsigned int irq_dirty;
174 int k; 158 int k;
175 159
176 /* 160 /*
@@ -217,21 +201,23 @@ static void ack_bcm1480_irq(unsigned int irq)
217 bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); 201 bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
218} 202}
219 203
220 204static struct irq_chip bcm1480_irq_type = {
221static void end_bcm1480_irq(unsigned int irq) 205 .name = "BCM1480-IMR",
222{ 206 .irq_mask_ack = ack_bcm1480_irq,
223 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { 207 .irq_mask = disable_bcm1480_irq,
224 bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq); 208 .irq_unmask = enable_bcm1480_irq,
225 } 209#ifdef CONFIG_SMP
226} 210 .irq_set_affinity = bcm1480_set_affinity
227 211#endif
212};
228 213
229void __init init_bcm1480_irqs(void) 214void __init init_bcm1480_irqs(void)
230{ 215{
231 int i; 216 int i;
232 217
233 for (i = 0; i < BCM1480_NR_IRQS; i++) { 218 for (i = 0; i < BCM1480_NR_IRQS; i++) {
234 set_irq_chip_and_handler(i, &bcm1480_irq_type, handle_level_irq); 219 irq_set_chip_and_handler(i, &bcm1480_irq_type,
220 handle_level_irq);
235 bcm1480_irq_owner[i] = 0; 221 bcm1480_irq_owner[i] = 0;
236 } 222 }
237} 223}
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 12ac04a658e..be4460a5f6a 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -43,31 +43,10 @@
43 * for interrupt lines 43 * for interrupt lines
44 */ 44 */
45 45
46
47static void end_sb1250_irq(unsigned int irq);
48static void enable_sb1250_irq(unsigned int irq);
49static void disable_sb1250_irq(unsigned int irq);
50static void ack_sb1250_irq(unsigned int irq);
51#ifdef CONFIG_SMP
52static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
53#endif
54
55#ifdef CONFIG_SIBYTE_HAS_LDT 46#ifdef CONFIG_SIBYTE_HAS_LDT
56extern unsigned long ldt_eoi_space; 47extern unsigned long ldt_eoi_space;
57#endif 48#endif
58 49
59static struct irq_chip sb1250_irq_type = {
60 .name = "SB1250-IMR",
61 .ack = ack_sb1250_irq,
62 .mask = disable_sb1250_irq,
63 .mask_ack = ack_sb1250_irq,
64 .unmask = enable_sb1250_irq,
65 .end = end_sb1250_irq,
66#ifdef CONFIG_SMP
67 .set_affinity = sb1250_set_affinity
68#endif
69};
70
71/* Store the CPU id (not the logical number) */ 50/* Store the CPU id (not the logical number) */
72int sb1250_irq_owner[SB1250_NR_IRQS]; 51int sb1250_irq_owner[SB1250_NR_IRQS];
73 52
@@ -102,9 +81,11 @@ void sb1250_unmask_irq(int cpu, int irq)
102} 81}
103 82
104#ifdef CONFIG_SMP 83#ifdef CONFIG_SMP
105static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) 84static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
85 bool force)
106{ 86{
107 int i = 0, old_cpu, cpu, int_on; 87 int i = 0, old_cpu, cpu, int_on;
88 unsigned int irq = d->irq;
108 u64 cur_ints; 89 u64 cur_ints;
109 unsigned long flags; 90 unsigned long flags;
110 91
@@ -142,21 +123,17 @@ static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
142} 123}
143#endif 124#endif
144 125
145/*****************************************************************************/ 126static void enable_sb1250_irq(struct irq_data *d)
146
147static void disable_sb1250_irq(unsigned int irq)
148{ 127{
149 sb1250_mask_irq(sb1250_irq_owner[irq], irq); 128 unsigned int irq = d->irq;
150}
151 129
152static void enable_sb1250_irq(unsigned int irq)
153{
154 sb1250_unmask_irq(sb1250_irq_owner[irq], irq); 130 sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
155} 131}
156 132
157 133
158static void ack_sb1250_irq(unsigned int irq) 134static void ack_sb1250_irq(struct irq_data *d)
159{ 135{
136 unsigned int irq = d->irq;
160#ifdef CONFIG_SIBYTE_HAS_LDT 137#ifdef CONFIG_SIBYTE_HAS_LDT
161 u64 pending; 138 u64 pending;
162 139
@@ -199,21 +176,22 @@ static void ack_sb1250_irq(unsigned int irq)
199 sb1250_mask_irq(sb1250_irq_owner[irq], irq); 176 sb1250_mask_irq(sb1250_irq_owner[irq], irq);
200} 177}
201 178
202 179static struct irq_chip sb1250_irq_type = {
203static void end_sb1250_irq(unsigned int irq) 180 .name = "SB1250-IMR",
204{ 181 .irq_mask_ack = ack_sb1250_irq,
205 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { 182 .irq_unmask = enable_sb1250_irq,
206 sb1250_unmask_irq(sb1250_irq_owner[irq], irq); 183#ifdef CONFIG_SMP
207 } 184 .irq_set_affinity = sb1250_set_affinity
208} 185#endif
209 186};
210 187
211void __init init_sb1250_irqs(void) 188void __init init_sb1250_irqs(void)
212{ 189{
213 int i; 190 int i;
214 191
215 for (i = 0; i < SB1250_NR_IRQS; i++) { 192 for (i = 0; i < SB1250_NR_IRQS; i++) {
216 set_irq_chip_and_handler(i, &sb1250_irq_type, handle_level_irq); 193 irq_set_chip_and_handler(i, &sb1250_irq_type,
194 handle_level_irq);
217 sb1250_irq_owner[i] = 0; 195 sb1250_irq_owner[i] = 0;
218 } 196 }
219} 197}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index bbe7187879f..c48194c3073 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -168,33 +168,22 @@ static u32 a20r_ack_hwint(void)
168 return status; 168 return status;
169} 169}
170 170
171static inline void unmask_a20r_irq(unsigned int irq) 171static inline void unmask_a20r_irq(struct irq_data *d)
172{ 172{
173 set_c0_status(0x100 << (irq - SNI_A20R_IRQ_BASE)); 173 set_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
174 irq_enable_hazard(); 174 irq_enable_hazard();
175} 175}
176 176
177static inline void mask_a20r_irq(unsigned int irq) 177static inline void mask_a20r_irq(struct irq_data *d)
178{ 178{
179 clear_c0_status(0x100 << (irq - SNI_A20R_IRQ_BASE)); 179 clear_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
180 irq_disable_hazard(); 180 irq_disable_hazard();
181} 181}
182 182
183static void end_a20r_irq(unsigned int irq)
184{
185 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
186 a20r_ack_hwint();
187 unmask_a20r_irq(irq);
188 }
189}
190
191static struct irq_chip a20r_irq_type = { 183static struct irq_chip a20r_irq_type = {
192 .name = "A20R", 184 .name = "A20R",
193 .ack = mask_a20r_irq, 185 .irq_mask = mask_a20r_irq,
194 .mask = mask_a20r_irq, 186 .irq_unmask = unmask_a20r_irq,
195 .mask_ack = mask_a20r_irq,
196 .unmask = unmask_a20r_irq,
197 .end = end_a20r_irq,
198}; 187};
199 188
200/* 189/*
@@ -220,7 +209,7 @@ void __init sni_a20r_irq_init(void)
220 int i; 209 int i;
221 210
222 for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++) 211 for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++)
223 set_irq_chip_and_handler(i, &a20r_irq_type, handle_level_irq); 212 irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
224 sni_hwint = a20r_hwint; 213 sni_hwint = a20r_hwint;
225 change_c0_status(ST0_IM, IE_IRQ0); 214 change_c0_status(ST0_IM, IE_IRQ0);
226 setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq); 215 setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq);
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index 8c92c73bc71..ed3b3d31735 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -194,33 +194,24 @@ static struct pci_controller sni_controller = {
194 .io_map_base = SNI_PORT_BASE 194 .io_map_base = SNI_PORT_BASE
195}; 195};
196 196
197static void enable_pcimt_irq(unsigned int irq) 197static void enable_pcimt_irq(struct irq_data *d)
198{ 198{
199 unsigned int mask = 1 << (irq - PCIMT_IRQ_INT2); 199 unsigned int mask = 1 << (d->irq - PCIMT_IRQ_INT2);
200 200
201 *(volatile u8 *) PCIMT_IRQSEL |= mask; 201 *(volatile u8 *) PCIMT_IRQSEL |= mask;
202} 202}
203 203
204void disable_pcimt_irq(unsigned int irq) 204void disable_pcimt_irq(struct irq_data *d)
205{ 205{
206 unsigned int mask = ~(1 << (irq - PCIMT_IRQ_INT2)); 206 unsigned int mask = ~(1 << (d->irq - PCIMT_IRQ_INT2));
207 207
208 *(volatile u8 *) PCIMT_IRQSEL &= mask; 208 *(volatile u8 *) PCIMT_IRQSEL &= mask;
209} 209}
210 210
211static void end_pcimt_irq(unsigned int irq)
212{
213 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
214 enable_pcimt_irq(irq);
215}
216
217static struct irq_chip pcimt_irq_type = { 211static struct irq_chip pcimt_irq_type = {
218 .name = "PCIMT", 212 .name = "PCIMT",
219 .ack = disable_pcimt_irq, 213 .irq_mask = disable_pcimt_irq,
220 .mask = disable_pcimt_irq, 214 .irq_unmask = enable_pcimt_irq,
221 .mask_ack = disable_pcimt_irq,
222 .unmask = enable_pcimt_irq,
223 .end = end_pcimt_irq,
224}; 215};
225 216
226/* 217/*
@@ -305,7 +296,7 @@ void __init sni_pcimt_irq_init(void)
305 mips_cpu_irq_init(); 296 mips_cpu_irq_init();
306 /* Actually we've got more interrupts to handle ... */ 297 /* Actually we've got more interrupts to handle ... */
307 for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++) 298 for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++)
308 set_irq_chip_and_handler(i, &pcimt_irq_type, handle_level_irq); 299 irq_set_chip_and_handler(i, &pcimt_irq_type, handle_level_irq);
309 sni_hwint = sni_pcimt_hwint; 300 sni_hwint = sni_pcimt_hwint;
310 change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3); 301 change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3);
311} 302}
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c
index dc9874553be..b5246373d16 100644
--- a/arch/mips/sni/pcit.c
+++ b/arch/mips/sni/pcit.c
@@ -156,33 +156,24 @@ static struct pci_controller sni_pcit_controller = {
156 .io_map_base = SNI_PORT_BASE 156 .io_map_base = SNI_PORT_BASE
157}; 157};
158 158
159static void enable_pcit_irq(unsigned int irq) 159static void enable_pcit_irq(struct irq_data *d)
160{ 160{
161 u32 mask = 1 << (irq - SNI_PCIT_INT_START + 24); 161 u32 mask = 1 << (d->irq - SNI_PCIT_INT_START + 24);
162 162
163 *(volatile u32 *)SNI_PCIT_INT_REG |= mask; 163 *(volatile u32 *)SNI_PCIT_INT_REG |= mask;
164} 164}
165 165
166void disable_pcit_irq(unsigned int irq) 166void disable_pcit_irq(struct irq_data *d)
167{ 167{
168 u32 mask = 1 << (irq - SNI_PCIT_INT_START + 24); 168 u32 mask = 1 << (d->irq - SNI_PCIT_INT_START + 24);
169 169
170 *(volatile u32 *)SNI_PCIT_INT_REG &= ~mask; 170 *(volatile u32 *)SNI_PCIT_INT_REG &= ~mask;
171} 171}
172 172
173void end_pcit_irq(unsigned int irq)
174{
175 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
176 enable_pcit_irq(irq);
177}
178
179static struct irq_chip pcit_irq_type = { 173static struct irq_chip pcit_irq_type = {
180 .name = "PCIT", 174 .name = "PCIT",
181 .ack = disable_pcit_irq, 175 .irq_mask = disable_pcit_irq,
182 .mask = disable_pcit_irq, 176 .irq_unmask = enable_pcit_irq,
183 .mask_ack = disable_pcit_irq,
184 .unmask = enable_pcit_irq,
185 .end = end_pcit_irq,
186}; 177};
187 178
188static void pcit_hwint1(void) 179static void pcit_hwint1(void)
@@ -247,7 +238,7 @@ void __init sni_pcit_irq_init(void)
247 238
248 mips_cpu_irq_init(); 239 mips_cpu_irq_init();
249 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) 240 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
250 set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 241 irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
251 *(volatile u32 *)SNI_PCIT_INT_REG = 0; 242 *(volatile u32 *)SNI_PCIT_INT_REG = 0;
252 sni_hwint = sni_pcit_hwint; 243 sni_hwint = sni_pcit_hwint;
253 change_c0_status(ST0_IM, IE_IRQ1); 244 change_c0_status(ST0_IM, IE_IRQ1);
@@ -260,7 +251,7 @@ void __init sni_pcit_cplus_irq_init(void)
260 251
261 mips_cpu_irq_init(); 252 mips_cpu_irq_init();
262 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) 253 for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
263 set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); 254 irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
264 *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000; 255 *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000;
265 sni_hwint = sni_pcit_hwint_cplus; 256 sni_hwint = sni_pcit_hwint_cplus;
266 change_c0_status(ST0_IM, IE_IRQ0); 257 change_c0_status(ST0_IM, IE_IRQ0);
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 0e6f42c2bbc..a7e5a6d917b 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -155,12 +155,11 @@ static __iomem u8 *rm200_pic_slave;
155#define cached_master_mask (rm200_cached_irq_mask) 155#define cached_master_mask (rm200_cached_irq_mask)
156#define cached_slave_mask (rm200_cached_irq_mask >> 8) 156#define cached_slave_mask (rm200_cached_irq_mask >> 8)
157 157
158static void sni_rm200_disable_8259A_irq(unsigned int irq) 158static void sni_rm200_disable_8259A_irq(struct irq_data *d)
159{ 159{
160 unsigned int mask; 160 unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
161 unsigned long flags; 161 unsigned long flags;
162 162
163 irq -= RM200_I8259A_IRQ_BASE;
164 mask = 1 << irq; 163 mask = 1 << irq;
165 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); 164 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
166 rm200_cached_irq_mask |= mask; 165 rm200_cached_irq_mask |= mask;
@@ -171,12 +170,11 @@ static void sni_rm200_disable_8259A_irq(unsigned int irq)
171 raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); 170 raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags);
172} 171}
173 172
174static void sni_rm200_enable_8259A_irq(unsigned int irq) 173static void sni_rm200_enable_8259A_irq(struct irq_data *d)
175{ 174{
176 unsigned int mask; 175 unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE;
177 unsigned long flags; 176 unsigned long flags;
178 177
179 irq -= RM200_I8259A_IRQ_BASE;
180 mask = ~(1 << irq); 178 mask = ~(1 << irq);
181 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); 179 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
182 rm200_cached_irq_mask &= mask; 180 rm200_cached_irq_mask &= mask;
@@ -210,12 +208,11 @@ static inline int sni_rm200_i8259A_irq_real(unsigned int irq)
210 * first, _then_ send the EOI, and the order of EOI 208 * first, _then_ send the EOI, and the order of EOI
211 * to the two 8259s is important! 209 * to the two 8259s is important!
212 */ 210 */
213void sni_rm200_mask_and_ack_8259A(unsigned int irq) 211void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
214{ 212{
215 unsigned int irqmask; 213 unsigned int irqmask, irq = d->irq - RM200_I8259A_IRQ_BASE;
216 unsigned long flags; 214 unsigned long flags;
217 215
218 irq -= RM200_I8259A_IRQ_BASE;
219 irqmask = 1 << irq; 216 irqmask = 1 << irq;
220 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); 217 raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags);
221 /* 218 /*
@@ -285,9 +282,9 @@ spurious_8259A_irq:
285 282
286static struct irq_chip sni_rm200_i8259A_chip = { 283static struct irq_chip sni_rm200_i8259A_chip = {
287 .name = "RM200-XT-PIC", 284 .name = "RM200-XT-PIC",
288 .mask = sni_rm200_disable_8259A_irq, 285 .irq_mask = sni_rm200_disable_8259A_irq,
289 .unmask = sni_rm200_enable_8259A_irq, 286 .irq_unmask = sni_rm200_enable_8259A_irq,
290 .mask_ack = sni_rm200_mask_and_ack_8259A, 287 .irq_mask_ack = sni_rm200_mask_and_ack_8259A,
291}; 288};
292 289
293/* 290/*
@@ -416,7 +413,7 @@ void __init sni_rm200_i8259_irqs(void)
416 sni_rm200_init_8259A(); 413 sni_rm200_init_8259A();
417 414
418 for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) 415 for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++)
419 set_irq_chip_and_handler(i, &sni_rm200_i8259A_chip, 416 irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip,
420 handle_level_irq); 417 handle_level_irq);
421 418
422 setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); 419 setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2);
@@ -429,33 +426,24 @@ void __init sni_rm200_i8259_irqs(void)
429#define SNI_RM200_INT_START 24 426#define SNI_RM200_INT_START 24
430#define SNI_RM200_INT_END 28 427#define SNI_RM200_INT_END 28
431 428
432static void enable_rm200_irq(unsigned int irq) 429static void enable_rm200_irq(struct irq_data *d)
433{ 430{
434 unsigned int mask = 1 << (irq - SNI_RM200_INT_START); 431 unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
435 432
436 *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask; 433 *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask;
437} 434}
438 435
439void disable_rm200_irq(unsigned int irq) 436void disable_rm200_irq(struct irq_data *d)
440{ 437{
441 unsigned int mask = 1 << (irq - SNI_RM200_INT_START); 438 unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START);
442 439
443 *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask; 440 *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask;
444} 441}
445 442
446void end_rm200_irq(unsigned int irq)
447{
448 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
449 enable_rm200_irq(irq);
450}
451
452static struct irq_chip rm200_irq_type = { 443static struct irq_chip rm200_irq_type = {
453 .name = "RM200", 444 .name = "RM200",
454 .ack = disable_rm200_irq, 445 .irq_mask = disable_rm200_irq,
455 .mask = disable_rm200_irq, 446 .irq_unmask = enable_rm200_irq,
456 .mask_ack = disable_rm200_irq,
457 .unmask = enable_rm200_irq,
458 .end = end_rm200_irq,
459}; 447};
460 448
461static void sni_rm200_hwint(void) 449static void sni_rm200_hwint(void)
@@ -489,7 +477,7 @@ void __init sni_rm200_irq_init(void)
489 mips_cpu_irq_init(); 477 mips_cpu_irq_init();
490 /* Actually we've got more interrupts to handle ... */ 478 /* Actually we've got more interrupts to handle ... */
491 for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) 479 for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++)
492 set_irq_chip_and_handler(i, &rm200_irq_type, handle_level_irq); 480 irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq);
493 sni_hwint = sni_rm200_hwint; 481 sni_hwint = sni_rm200_hwint;
494 change_c0_status(ST0_IM, IE_IRQ0); 482 change_c0_status(ST0_IM, IE_IRQ0);
495 setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq); 483 setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq);
diff --git a/arch/mips/txx9/generic/irq_tx4927.c b/arch/mips/txx9/generic/irq_tx4927.c
index e1828e8bcae..7e3ac5782da 100644
--- a/arch/mips/txx9/generic/irq_tx4927.c
+++ b/arch/mips/txx9/generic/irq_tx4927.c
@@ -35,7 +35,7 @@ void __init tx4927_irq_init(void)
35 35
36 mips_cpu_irq_init(); 36 mips_cpu_irq_init();
37 txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL); 37 txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL);
38 set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, 38 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT,
39 handle_simple_irq); 39 handle_simple_irq);
40 /* raise priority for errors, timers, SIO */ 40 /* raise priority for errors, timers, SIO */
41 txx9_irq_set_pri(TX4927_IR_ECCERR, 7); 41 txx9_irq_set_pri(TX4927_IR_ECCERR, 7);
diff --git a/arch/mips/txx9/generic/irq_tx4938.c b/arch/mips/txx9/generic/irq_tx4938.c
index a6e6e805097..aace8565332 100644
--- a/arch/mips/txx9/generic/irq_tx4938.c
+++ b/arch/mips/txx9/generic/irq_tx4938.c
@@ -23,7 +23,7 @@ void __init tx4938_irq_init(void)
23 23
24 mips_cpu_irq_init(); 24 mips_cpu_irq_init();
25 txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL); 25 txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL);
26 set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT, 26 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT,
27 handle_simple_irq); 27 handle_simple_irq);
28 /* raise priority for errors, timers, SIO */ 28 /* raise priority for errors, timers, SIO */
29 txx9_irq_set_pri(TX4938_IR_ECCERR, 7); 29 txx9_irq_set_pri(TX4938_IR_ECCERR, 7);
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c
index 3886ad77cba..6b067dbd2ae 100644
--- a/arch/mips/txx9/generic/irq_tx4939.c
+++ b/arch/mips/txx9/generic/irq_tx4939.c
@@ -50,9 +50,9 @@ static struct {
50 unsigned char mode; 50 unsigned char mode;
51} tx4939irq[TX4939_NUM_IR] __read_mostly; 51} tx4939irq[TX4939_NUM_IR] __read_mostly;
52 52
53static void tx4939_irq_unmask(unsigned int irq) 53static void tx4939_irq_unmask(struct irq_data *d)
54{ 54{
55 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 55 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
56 u32 __iomem *lvlp; 56 u32 __iomem *lvlp;
57 int ofs; 57 int ofs;
58 if (irq_nr < 32) { 58 if (irq_nr < 32) {
@@ -68,9 +68,9 @@ static void tx4939_irq_unmask(unsigned int irq)
68 lvlp); 68 lvlp);
69} 69}
70 70
71static inline void tx4939_irq_mask(unsigned int irq) 71static inline void tx4939_irq_mask(struct irq_data *d)
72{ 72{
73 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 73 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
74 u32 __iomem *lvlp; 74 u32 __iomem *lvlp;
75 int ofs; 75 int ofs;
76 if (irq_nr < 32) { 76 if (irq_nr < 32) {
@@ -87,11 +87,11 @@ static inline void tx4939_irq_mask(unsigned int irq)
87 mmiowb(); 87 mmiowb();
88} 88}
89 89
90static void tx4939_irq_mask_ack(unsigned int irq) 90static void tx4939_irq_mask_ack(struct irq_data *d)
91{ 91{
92 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 92 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
93 93
94 tx4939_irq_mask(irq); 94 tx4939_irq_mask(d);
95 if (TXx9_IRCR_EDGE(tx4939irq[irq_nr].mode)) { 95 if (TXx9_IRCR_EDGE(tx4939irq[irq_nr].mode)) {
96 irq_nr--; 96 irq_nr--;
97 /* clear edge detection */ 97 /* clear edge detection */
@@ -101,9 +101,9 @@ static void tx4939_irq_mask_ack(unsigned int irq)
101 } 101 }
102} 102}
103 103
104static int tx4939_irq_set_type(unsigned int irq, unsigned int flow_type) 104static int tx4939_irq_set_type(struct irq_data *d, unsigned int flow_type)
105{ 105{
106 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 106 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
107 u32 cr; 107 u32 cr;
108 u32 __iomem *crp; 108 u32 __iomem *crp;
109 int ofs; 109 int ofs;
@@ -145,11 +145,11 @@ static int tx4939_irq_set_type(unsigned int irq, unsigned int flow_type)
145 145
146static struct irq_chip tx4939_irq_chip = { 146static struct irq_chip tx4939_irq_chip = {
147 .name = "TX4939", 147 .name = "TX4939",
148 .ack = tx4939_irq_mask_ack, 148 .irq_ack = tx4939_irq_mask_ack,
149 .mask = tx4939_irq_mask, 149 .irq_mask = tx4939_irq_mask,
150 .mask_ack = tx4939_irq_mask_ack, 150 .irq_mask_ack = tx4939_irq_mask_ack,
151 .unmask = tx4939_irq_unmask, 151 .irq_unmask = tx4939_irq_unmask,
152 .set_type = tx4939_irq_set_type, 152 .irq_set_type = tx4939_irq_set_type,
153}; 153};
154 154
155static int tx4939_irq_set_pri(int irc_irq, int new_pri) 155static int tx4939_irq_set_pri(int irc_irq, int new_pri)
@@ -176,8 +176,8 @@ void __init tx4939_irq_init(void)
176 for (i = 1; i < TX4939_NUM_IR; i++) { 176 for (i = 1; i < TX4939_NUM_IR; i++) {
177 tx4939irq[i].level = 4; /* middle level */ 177 tx4939irq[i].level = 4; /* middle level */
178 tx4939irq[i].mode = TXx9_IRCR_LOW; 178 tx4939irq[i].mode = TXx9_IRCR_LOW;
179 set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 179 irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip,
180 &tx4939_irq_chip, handle_level_irq); 180 handle_level_irq);
181 } 181 }
182 182
183 /* mask all IRC interrupts */ 183 /* mask all IRC interrupts */
@@ -193,7 +193,7 @@ void __init tx4939_irq_init(void)
193 __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r); 193 __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r);
194 __raw_writel(irc_elevel, &tx4939_ircptr->msk.r); 194 __raw_writel(irc_elevel, &tx4939_ircptr->msk.r);
195 195
196 set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, 196 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT,
197 handle_simple_irq); 197 handle_simple_irq);
198 198
199 /* raise priority for errors, timers, sio */ 199 /* raise priority for errors, timers, sio */
diff --git a/arch/mips/txx9/jmr3927/irq.c b/arch/mips/txx9/jmr3927/irq.c
index 0a7f8e3b9fd..c22c859a2c4 100644
--- a/arch/mips/txx9/jmr3927/irq.c
+++ b/arch/mips/txx9/jmr3927/irq.c
@@ -47,20 +47,20 @@
47 * CP0_STATUS is a thread's resource (saved/restored on context switch). 47 * CP0_STATUS is a thread's resource (saved/restored on context switch).
48 * So disable_irq/enable_irq MUST handle IOC/IRC registers. 48 * So disable_irq/enable_irq MUST handle IOC/IRC registers.
49 */ 49 */
50static void mask_irq_ioc(unsigned int irq) 50static void mask_irq_ioc(struct irq_data *d)
51{ 51{
52 /* 0: mask */ 52 /* 0: mask */
53 unsigned int irq_nr = irq - JMR3927_IRQ_IOC; 53 unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC;
54 unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); 54 unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
55 unsigned int bit = 1 << irq_nr; 55 unsigned int bit = 1 << irq_nr;
56 jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR); 56 jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR);
57 /* flush write buffer */ 57 /* flush write buffer */
58 (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); 58 (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR);
59} 59}
60static void unmask_irq_ioc(unsigned int irq) 60static void unmask_irq_ioc(struct irq_data *d)
61{ 61{
62 /* 0: mask */ 62 /* 0: mask */
63 unsigned int irq_nr = irq - JMR3927_IRQ_IOC; 63 unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC;
64 unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); 64 unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR);
65 unsigned int bit = 1 << irq_nr; 65 unsigned int bit = 1 << irq_nr;
66 jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR); 66 jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR);
@@ -95,10 +95,8 @@ static int jmr3927_irq_dispatch(int pending)
95 95
96static struct irq_chip jmr3927_irq_ioc = { 96static struct irq_chip jmr3927_irq_ioc = {
97 .name = "jmr3927_ioc", 97 .name = "jmr3927_ioc",
98 .ack = mask_irq_ioc, 98 .irq_mask = mask_irq_ioc,
99 .mask = mask_irq_ioc, 99 .irq_unmask = unmask_irq_ioc,
100 .mask_ack = mask_irq_ioc,
101 .unmask = unmask_irq_ioc,
102}; 100};
103 101
104void __init jmr3927_irq_setup(void) 102void __init jmr3927_irq_setup(void)
@@ -122,8 +120,9 @@ void __init jmr3927_irq_setup(void)
122 120
123 tx3927_irq_init(); 121 tx3927_irq_init();
124 for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) 122 for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++)
125 set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); 123 irq_set_chip_and_handler(i, &jmr3927_irq_ioc,
124 handle_level_irq);
126 125
127 /* setup IOC interrupt 1 (PCI, MODEM) */ 126 /* setup IOC interrupt 1 (PCI, MODEM) */
128 set_irq_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); 127 irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq);
129} 128}
diff --git a/arch/mips/txx9/rbtx4927/irq.c b/arch/mips/txx9/rbtx4927/irq.c
index c4b54d20efd..6c22c496090 100644
--- a/arch/mips/txx9/rbtx4927/irq.c
+++ b/arch/mips/txx9/rbtx4927/irq.c
@@ -117,18 +117,6 @@
117#include <asm/txx9/generic.h> 117#include <asm/txx9/generic.h>
118#include <asm/txx9/rbtx4927.h> 118#include <asm/txx9/rbtx4927.h>
119 119
120static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
121static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
122
123#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
124static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
125 .name = TOSHIBA_RBTX4927_IOC_NAME,
126 .ack = toshiba_rbtx4927_irq_ioc_disable,
127 .mask = toshiba_rbtx4927_irq_ioc_disable,
128 .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
129 .unmask = toshiba_rbtx4927_irq_ioc_enable,
130};
131
132static int toshiba_rbtx4927_irq_nested(int sw_irq) 120static int toshiba_rbtx4927_irq_nested(int sw_irq)
133{ 121{
134 u8 level3; 122 u8 level3;
@@ -139,41 +127,47 @@ static int toshiba_rbtx4927_irq_nested(int sw_irq)
139 return RBTX4927_IRQ_IOC + __fls8(level3); 127 return RBTX4927_IRQ_IOC + __fls8(level3);
140} 128}
141 129
142static void __init toshiba_rbtx4927_irq_ioc_init(void) 130static void toshiba_rbtx4927_irq_ioc_enable(struct irq_data *d)
143{
144 int i;
145
146 /* mask all IOC interrupts */
147 writeb(0, rbtx4927_imask_addr);
148 /* clear SoftInt interrupts */
149 writeb(0, rbtx4927_softint_addr);
150
151 for (i = RBTX4927_IRQ_IOC;
152 i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++)
153 set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
154 handle_level_irq);
155 set_irq_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq);
156}
157
158static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
159{ 131{
160 unsigned char v; 132 unsigned char v;
161 133
162 v = readb(rbtx4927_imask_addr); 134 v = readb(rbtx4927_imask_addr);
163 v |= (1 << (irq - RBTX4927_IRQ_IOC)); 135 v |= (1 << (d->irq - RBTX4927_IRQ_IOC));
164 writeb(v, rbtx4927_imask_addr); 136 writeb(v, rbtx4927_imask_addr);
165} 137}
166 138
167static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq) 139static void toshiba_rbtx4927_irq_ioc_disable(struct irq_data *d)
168{ 140{
169 unsigned char v; 141 unsigned char v;
170 142
171 v = readb(rbtx4927_imask_addr); 143 v = readb(rbtx4927_imask_addr);
172 v &= ~(1 << (irq - RBTX4927_IRQ_IOC)); 144 v &= ~(1 << (d->irq - RBTX4927_IRQ_IOC));
173 writeb(v, rbtx4927_imask_addr); 145 writeb(v, rbtx4927_imask_addr);
174 mmiowb(); 146 mmiowb();
175} 147}
176 148
149#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
150static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
151 .name = TOSHIBA_RBTX4927_IOC_NAME,
152 .irq_mask = toshiba_rbtx4927_irq_ioc_disable,
153 .irq_unmask = toshiba_rbtx4927_irq_ioc_enable,
154};
155
156static void __init toshiba_rbtx4927_irq_ioc_init(void)
157{
158 int i;
159
160 /* mask all IOC interrupts */
161 writeb(0, rbtx4927_imask_addr);
162 /* clear SoftInt interrupts */
163 writeb(0, rbtx4927_softint_addr);
164
165 for (i = RBTX4927_IRQ_IOC;
166 i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++)
167 irq_set_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
168 handle_level_irq);
169 irq_set_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq);
170}
177 171
178static int rbtx4927_irq_dispatch(int pending) 172static int rbtx4927_irq_dispatch(int pending)
179{ 173{
@@ -200,5 +194,5 @@ void __init rbtx4927_irq_setup(void)
200 tx4927_irq_init(); 194 tx4927_irq_init();
201 toshiba_rbtx4927_irq_ioc_init(); 195 toshiba_rbtx4927_irq_ioc_init();
202 /* Onboard 10M Ether: High Active */ 196 /* Onboard 10M Ether: High Active */
203 set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH); 197 irq_set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH);
204} 198}
diff --git a/arch/mips/txx9/rbtx4938/irq.c b/arch/mips/txx9/rbtx4938/irq.c
index 67a73a8065e..58cd7a9272c 100644
--- a/arch/mips/txx9/rbtx4938/irq.c
+++ b/arch/mips/txx9/rbtx4938/irq.c
@@ -69,18 +69,6 @@
69#include <asm/txx9/generic.h> 69#include <asm/txx9/generic.h>
70#include <asm/txx9/rbtx4938.h> 70#include <asm/txx9/rbtx4938.h>
71 71
72static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
73static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
74
75#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
76static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
77 .name = TOSHIBA_RBTX4938_IOC_NAME,
78 .ack = toshiba_rbtx4938_irq_ioc_disable,
79 .mask = toshiba_rbtx4938_irq_ioc_disable,
80 .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
81 .unmask = toshiba_rbtx4938_irq_ioc_enable,
82};
83
84static int toshiba_rbtx4938_irq_nested(int sw_irq) 72static int toshiba_rbtx4938_irq_nested(int sw_irq)
85{ 73{
86 u8 level3; 74 u8 level3;
@@ -92,41 +80,33 @@ static int toshiba_rbtx4938_irq_nested(int sw_irq)
92 return RBTX4938_IRQ_IOC + __fls8(level3); 80 return RBTX4938_IRQ_IOC + __fls8(level3);
93} 81}
94 82
95static void __init 83static void toshiba_rbtx4938_irq_ioc_enable(struct irq_data *d)
96toshiba_rbtx4938_irq_ioc_init(void)
97{
98 int i;
99
100 for (i = RBTX4938_IRQ_IOC;
101 i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++)
102 set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
103 handle_level_irq);
104
105 set_irq_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
106}
107
108static void
109toshiba_rbtx4938_irq_ioc_enable(unsigned int irq)
110{ 84{
111 unsigned char v; 85 unsigned char v;
112 86
113 v = readb(rbtx4938_imask_addr); 87 v = readb(rbtx4938_imask_addr);
114 v |= (1 << (irq - RBTX4938_IRQ_IOC)); 88 v |= (1 << (d->irq - RBTX4938_IRQ_IOC));
115 writeb(v, rbtx4938_imask_addr); 89 writeb(v, rbtx4938_imask_addr);
116 mmiowb(); 90 mmiowb();
117} 91}
118 92
119static void 93static void toshiba_rbtx4938_irq_ioc_disable(struct irq_data *d)
120toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
121{ 94{
122 unsigned char v; 95 unsigned char v;
123 96
124 v = readb(rbtx4938_imask_addr); 97 v = readb(rbtx4938_imask_addr);
125 v &= ~(1 << (irq - RBTX4938_IRQ_IOC)); 98 v &= ~(1 << (d->irq - RBTX4938_IRQ_IOC));
126 writeb(v, rbtx4938_imask_addr); 99 writeb(v, rbtx4938_imask_addr);
127 mmiowb(); 100 mmiowb();
128} 101}
129 102
103#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
104static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
105 .name = TOSHIBA_RBTX4938_IOC_NAME,
106 .irq_mask = toshiba_rbtx4938_irq_ioc_disable,
107 .irq_unmask = toshiba_rbtx4938_irq_ioc_enable,
108};
109
130static int rbtx4938_irq_dispatch(int pending) 110static int rbtx4938_irq_dispatch(int pending)
131{ 111{
132 int irq; 112 int irq;
@@ -146,6 +126,18 @@ static int rbtx4938_irq_dispatch(int pending)
146 return irq; 126 return irq;
147} 127}
148 128
129static void __init toshiba_rbtx4938_irq_ioc_init(void)
130{
131 int i;
132
133 for (i = RBTX4938_IRQ_IOC;
134 i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++)
135 irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
136 handle_level_irq);
137
138 irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
139}
140
149void __init rbtx4938_irq_setup(void) 141void __init rbtx4938_irq_setup(void)
150{ 142{
151 txx9_irq_dispatch = rbtx4938_irq_dispatch; 143 txx9_irq_dispatch = rbtx4938_irq_dispatch;
@@ -161,5 +153,5 @@ void __init rbtx4938_irq_setup(void)
161 tx4938_irq_init(); 153 tx4938_irq_init();
162 toshiba_rbtx4938_irq_ioc_init(); 154 toshiba_rbtx4938_irq_ioc_init();
163 /* Onboard 10M Ether: High Active */ 155 /* Onboard 10M Ether: High Active */
164 set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); 156 irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
165} 157}
diff --git a/arch/mips/txx9/rbtx4939/irq.c b/arch/mips/txx9/rbtx4939/irq.c
index 57fa740a720..69a80616f0c 100644
--- a/arch/mips/txx9/rbtx4939/irq.c
+++ b/arch/mips/txx9/rbtx4939/irq.c
@@ -19,16 +19,16 @@
19 * RBTX4939 IOC controller definition 19 * RBTX4939 IOC controller definition
20 */ 20 */
21 21
22static void rbtx4939_ioc_irq_unmask(unsigned int irq) 22static void rbtx4939_ioc_irq_unmask(struct irq_data *d)
23{ 23{
24 int ioc_nr = irq - RBTX4939_IRQ_IOC; 24 int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
25 25
26 writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr); 26 writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr);
27} 27}
28 28
29static void rbtx4939_ioc_irq_mask(unsigned int irq) 29static void rbtx4939_ioc_irq_mask(struct irq_data *d)
30{ 30{
31 int ioc_nr = irq - RBTX4939_IRQ_IOC; 31 int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
32 32
33 writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr); 33 writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr);
34 mmiowb(); 34 mmiowb();
@@ -36,10 +36,8 @@ static void rbtx4939_ioc_irq_mask(unsigned int irq)
36 36
37static struct irq_chip rbtx4939_ioc_irq_chip = { 37static struct irq_chip rbtx4939_ioc_irq_chip = {
38 .name = "IOC", 38 .name = "IOC",
39 .ack = rbtx4939_ioc_irq_mask, 39 .irq_mask = rbtx4939_ioc_irq_mask,
40 .mask = rbtx4939_ioc_irq_mask, 40 .irq_unmask = rbtx4939_ioc_irq_unmask,
41 .mask_ack = rbtx4939_ioc_irq_mask,
42 .unmask = rbtx4939_ioc_irq_unmask,
43}; 41};
44 42
45 43
@@ -90,8 +88,8 @@ void __init rbtx4939_irq_setup(void)
90 tx4939_irq_init(); 88 tx4939_irq_init();
91 for (i = RBTX4939_IRQ_IOC; 89 for (i = RBTX4939_IRQ_IOC;
92 i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) 90 i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++)
93 set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip, 91 irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip,
94 handle_level_irq); 92 handle_level_irq);
95 93
96 set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); 94 irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq);
97} 95}
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6153b6a05cc..a39ef3207d7 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -154,7 +154,7 @@ static inline uint16_t icu2_clear(uint8_t offset, uint16_t clear)
154 154
155void vr41xx_enable_piuint(uint16_t mask) 155void vr41xx_enable_piuint(uint16_t mask)
156{ 156{
157 struct irq_desc *desc = irq_desc + PIU_IRQ; 157 struct irq_desc *desc = irq_to_desc(PIU_IRQ);
158 unsigned long flags; 158 unsigned long flags;
159 159
160 if (current_cpu_type() == CPU_VR4111 || 160 if (current_cpu_type() == CPU_VR4111 ||
@@ -169,7 +169,7 @@ EXPORT_SYMBOL(vr41xx_enable_piuint);
169 169
170void vr41xx_disable_piuint(uint16_t mask) 170void vr41xx_disable_piuint(uint16_t mask)
171{ 171{
172 struct irq_desc *desc = irq_desc + PIU_IRQ; 172 struct irq_desc *desc = irq_to_desc(PIU_IRQ);
173 unsigned long flags; 173 unsigned long flags;
174 174
175 if (current_cpu_type() == CPU_VR4111 || 175 if (current_cpu_type() == CPU_VR4111 ||
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(vr41xx_disable_piuint);
184 184
185void vr41xx_enable_aiuint(uint16_t mask) 185void vr41xx_enable_aiuint(uint16_t mask)
186{ 186{
187 struct irq_desc *desc = irq_desc + AIU_IRQ; 187 struct irq_desc *desc = irq_to_desc(AIU_IRQ);
188 unsigned long flags; 188 unsigned long flags;
189 189
190 if (current_cpu_type() == CPU_VR4111 || 190 if (current_cpu_type() == CPU_VR4111 ||
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(vr41xx_enable_aiuint);
199 199
200void vr41xx_disable_aiuint(uint16_t mask) 200void vr41xx_disable_aiuint(uint16_t mask)
201{ 201{
202 struct irq_desc *desc = irq_desc + AIU_IRQ; 202 struct irq_desc *desc = irq_to_desc(AIU_IRQ);
203 unsigned long flags; 203 unsigned long flags;
204 204
205 if (current_cpu_type() == CPU_VR4111 || 205 if (current_cpu_type() == CPU_VR4111 ||
@@ -214,7 +214,7 @@ EXPORT_SYMBOL(vr41xx_disable_aiuint);
214 214
215void vr41xx_enable_kiuint(uint16_t mask) 215void vr41xx_enable_kiuint(uint16_t mask)
216{ 216{
217 struct irq_desc *desc = irq_desc + KIU_IRQ; 217 struct irq_desc *desc = irq_to_desc(KIU_IRQ);
218 unsigned long flags; 218 unsigned long flags;
219 219
220 if (current_cpu_type() == CPU_VR4111 || 220 if (current_cpu_type() == CPU_VR4111 ||
@@ -229,7 +229,7 @@ EXPORT_SYMBOL(vr41xx_enable_kiuint);
229 229
230void vr41xx_disable_kiuint(uint16_t mask) 230void vr41xx_disable_kiuint(uint16_t mask)
231{ 231{
232 struct irq_desc *desc = irq_desc + KIU_IRQ; 232 struct irq_desc *desc = irq_to_desc(KIU_IRQ);
233 unsigned long flags; 233 unsigned long flags;
234 234
235 if (current_cpu_type() == CPU_VR4111 || 235 if (current_cpu_type() == CPU_VR4111 ||
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(vr41xx_disable_kiuint);
244 244
245void vr41xx_enable_macint(uint16_t mask) 245void vr41xx_enable_macint(uint16_t mask)
246{ 246{
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 247 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
248 unsigned long flags; 248 unsigned long flags;
249 249
250 raw_spin_lock_irqsave(&desc->lock, flags); 250 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(vr41xx_enable_macint);
256 256
257void vr41xx_disable_macint(uint16_t mask) 257void vr41xx_disable_macint(uint16_t mask)
258{ 258{
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 259 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
260 unsigned long flags; 260 unsigned long flags;
261 261
262 raw_spin_lock_irqsave(&desc->lock, flags); 262 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -268,7 +268,7 @@ EXPORT_SYMBOL(vr41xx_disable_macint);
268 268
269void vr41xx_enable_dsiuint(uint16_t mask) 269void vr41xx_enable_dsiuint(uint16_t mask)
270{ 270{
271 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
272 unsigned long flags; 272 unsigned long flags;
273 273
274 raw_spin_lock_irqsave(&desc->lock, flags); 274 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -280,7 +280,7 @@ EXPORT_SYMBOL(vr41xx_enable_dsiuint);
280 280
281void vr41xx_disable_dsiuint(uint16_t mask) 281void vr41xx_disable_dsiuint(uint16_t mask)
282{ 282{
283 struct irq_desc *desc = irq_desc + DSIU_IRQ; 283 struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
284 unsigned long flags; 284 unsigned long flags;
285 285
286 raw_spin_lock_irqsave(&desc->lock, flags); 286 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -292,7 +292,7 @@ EXPORT_SYMBOL(vr41xx_disable_dsiuint);
292 292
293void vr41xx_enable_firint(uint16_t mask) 293void vr41xx_enable_firint(uint16_t mask)
294{ 294{
295 struct irq_desc *desc = irq_desc + FIR_IRQ; 295 struct irq_desc *desc = irq_to_desc(FIR_IRQ);
296 unsigned long flags; 296 unsigned long flags;
297 297
298 raw_spin_lock_irqsave(&desc->lock, flags); 298 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -304,7 +304,7 @@ EXPORT_SYMBOL(vr41xx_enable_firint);
304 304
305void vr41xx_disable_firint(uint16_t mask) 305void vr41xx_disable_firint(uint16_t mask)
306{ 306{
307 struct irq_desc *desc = irq_desc + FIR_IRQ; 307 struct irq_desc *desc = irq_to_desc(FIR_IRQ);
308 unsigned long flags; 308 unsigned long flags;
309 309
310 raw_spin_lock_irqsave(&desc->lock, flags); 310 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -316,7 +316,7 @@ EXPORT_SYMBOL(vr41xx_disable_firint);
316 316
317void vr41xx_enable_pciint(void) 317void vr41xx_enable_pciint(void)
318{ 318{
319 struct irq_desc *desc = irq_desc + PCI_IRQ; 319 struct irq_desc *desc = irq_to_desc(PCI_IRQ);
320 unsigned long flags; 320 unsigned long flags;
321 321
322 if (current_cpu_type() == CPU_VR4122 || 322 if (current_cpu_type() == CPU_VR4122 ||
@@ -332,7 +332,7 @@ EXPORT_SYMBOL(vr41xx_enable_pciint);
332 332
333void vr41xx_disable_pciint(void) 333void vr41xx_disable_pciint(void)
334{ 334{
335 struct irq_desc *desc = irq_desc + PCI_IRQ; 335 struct irq_desc *desc = irq_to_desc(PCI_IRQ);
336 unsigned long flags; 336 unsigned long flags;
337 337
338 if (current_cpu_type() == CPU_VR4122 || 338 if (current_cpu_type() == CPU_VR4122 ||
@@ -348,7 +348,7 @@ EXPORT_SYMBOL(vr41xx_disable_pciint);
348 348
349void vr41xx_enable_scuint(void) 349void vr41xx_enable_scuint(void)
350{ 350{
351 struct irq_desc *desc = irq_desc + SCU_IRQ; 351 struct irq_desc *desc = irq_to_desc(SCU_IRQ);
352 unsigned long flags; 352 unsigned long flags;
353 353
354 if (current_cpu_type() == CPU_VR4122 || 354 if (current_cpu_type() == CPU_VR4122 ||
@@ -364,7 +364,7 @@ EXPORT_SYMBOL(vr41xx_enable_scuint);
364 364
365void vr41xx_disable_scuint(void) 365void vr41xx_disable_scuint(void)
366{ 366{
367 struct irq_desc *desc = irq_desc + SCU_IRQ; 367 struct irq_desc *desc = irq_to_desc(SCU_IRQ);
368 unsigned long flags; 368 unsigned long flags;
369 369
370 if (current_cpu_type() == CPU_VR4122 || 370 if (current_cpu_type() == CPU_VR4122 ||
@@ -380,7 +380,7 @@ EXPORT_SYMBOL(vr41xx_disable_scuint);
380 380
381void vr41xx_enable_csiint(uint16_t mask) 381void vr41xx_enable_csiint(uint16_t mask)
382{ 382{
383 struct irq_desc *desc = irq_desc + CSI_IRQ; 383 struct irq_desc *desc = irq_to_desc(CSI_IRQ);
384 unsigned long flags; 384 unsigned long flags;
385 385
386 if (current_cpu_type() == CPU_VR4122 || 386 if (current_cpu_type() == CPU_VR4122 ||
@@ -396,7 +396,7 @@ EXPORT_SYMBOL(vr41xx_enable_csiint);
396 396
397void vr41xx_disable_csiint(uint16_t mask) 397void vr41xx_disable_csiint(uint16_t mask)
398{ 398{
399 struct irq_desc *desc = irq_desc + CSI_IRQ; 399 struct irq_desc *desc = irq_to_desc(CSI_IRQ);
400 unsigned long flags; 400 unsigned long flags;
401 401
402 if (current_cpu_type() == CPU_VR4122 || 402 if (current_cpu_type() == CPU_VR4122 ||
@@ -412,7 +412,7 @@ EXPORT_SYMBOL(vr41xx_disable_csiint);
412 412
413void vr41xx_enable_bcuint(void) 413void vr41xx_enable_bcuint(void)
414{ 414{
415 struct irq_desc *desc = irq_desc + BCU_IRQ; 415 struct irq_desc *desc = irq_to_desc(BCU_IRQ);
416 unsigned long flags; 416 unsigned long flags;
417 417
418 if (current_cpu_type() == CPU_VR4122 || 418 if (current_cpu_type() == CPU_VR4122 ||
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(vr41xx_enable_bcuint);
428 428
429void vr41xx_disable_bcuint(void) 429void vr41xx_disable_bcuint(void)
430{ 430{
431 struct irq_desc *desc = irq_desc + BCU_IRQ; 431 struct irq_desc *desc = irq_to_desc(BCU_IRQ);
432 unsigned long flags; 432 unsigned long flags;
433 433
434 if (current_cpu_type() == CPU_VR4122 || 434 if (current_cpu_type() == CPU_VR4122 ||
@@ -442,45 +442,41 @@ void vr41xx_disable_bcuint(void)
442 442
443EXPORT_SYMBOL(vr41xx_disable_bcuint); 443EXPORT_SYMBOL(vr41xx_disable_bcuint);
444 444
445static void disable_sysint1_irq(unsigned int irq) 445static void disable_sysint1_irq(struct irq_data *d)
446{ 446{
447 icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); 447 icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
448} 448}
449 449
450static void enable_sysint1_irq(unsigned int irq) 450static void enable_sysint1_irq(struct irq_data *d)
451{ 451{
452 icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); 452 icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
453} 453}
454 454
455static struct irq_chip sysint1_irq_type = { 455static struct irq_chip sysint1_irq_type = {
456 .name = "SYSINT1", 456 .name = "SYSINT1",
457 .ack = disable_sysint1_irq, 457 .irq_mask = disable_sysint1_irq,
458 .mask = disable_sysint1_irq, 458 .irq_unmask = enable_sysint1_irq,
459 .mask_ack = disable_sysint1_irq,
460 .unmask = enable_sysint1_irq,
461}; 459};
462 460
463static void disable_sysint2_irq(unsigned int irq) 461static void disable_sysint2_irq(struct irq_data *d)
464{ 462{
465 icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); 463 icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
466} 464}
467 465
468static void enable_sysint2_irq(unsigned int irq) 466static void enable_sysint2_irq(struct irq_data *d)
469{ 467{
470 icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); 468 icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
471} 469}
472 470
473static struct irq_chip sysint2_irq_type = { 471static struct irq_chip sysint2_irq_type = {
474 .name = "SYSINT2", 472 .name = "SYSINT2",
475 .ack = disable_sysint2_irq, 473 .irq_mask = disable_sysint2_irq,
476 .mask = disable_sysint2_irq, 474 .irq_unmask = enable_sysint2_irq,
477 .mask_ack = disable_sysint2_irq,
478 .unmask = enable_sysint2_irq,
479}; 475};
480 476
481static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) 477static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
482{ 478{
483 struct irq_desc *desc = irq_desc + irq; 479 struct irq_desc *desc = irq_to_desc(irq);
484 uint16_t intassign0, intassign1; 480 uint16_t intassign0, intassign1;
485 unsigned int pin; 481 unsigned int pin;
486 482
@@ -540,7 +536,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
540 536
541static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) 537static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
542{ 538{
543 struct irq_desc *desc = irq_desc + irq; 539 struct irq_desc *desc = irq_to_desc(irq);
544 uint16_t intassign2, intassign3; 540 uint16_t intassign2, intassign3;
545 unsigned int pin; 541 unsigned int pin;
546 542
@@ -714,11 +710,11 @@ static int __init vr41xx_icu_init(void)
714 icu2_write(MGIUINTHREG, 0xffff); 710 icu2_write(MGIUINTHREG, 0xffff);
715 711
716 for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++) 712 for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
717 set_irq_chip_and_handler(i, &sysint1_irq_type, 713 irq_set_chip_and_handler(i, &sysint1_irq_type,
718 handle_level_irq); 714 handle_level_irq);
719 715
720 for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++) 716 for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
721 set_irq_chip_and_handler(i, &sysint2_irq_type, 717 irq_set_chip_and_handler(i, &sysint2_irq_type,
722 handle_level_irq); 718 handle_level_irq);
723 719
724 cascade_irq(INT0_IRQ, icu_get_irq); 720 cascade_irq(INT0_IRQ, icu_get_irq);
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 0975eb72d38..70a3b85f375 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -62,7 +62,6 @@ EXPORT_SYMBOL_GPL(cascade_irq);
62static void irq_dispatch(unsigned int irq) 62static void irq_dispatch(unsigned int irq)
63{ 63{
64 irq_cascade_t *cascade; 64 irq_cascade_t *cascade;
65 struct irq_desc *desc;
66 65
67 if (irq >= NR_IRQS) { 66 if (irq >= NR_IRQS) {
68 atomic_inc(&irq_err_count); 67 atomic_inc(&irq_err_count);
@@ -71,14 +70,16 @@ static void irq_dispatch(unsigned int irq)
71 70
72 cascade = irq_cascade + irq; 71 cascade = irq_cascade + irq;
73 if (cascade->get_irq != NULL) { 72 if (cascade->get_irq != NULL) {
74 unsigned int source_irq = irq; 73 struct irq_desc *desc = irq_to_desc(irq);
74 struct irq_data *idata = irq_desc_get_irq_data(desc);
75 struct irq_chip *chip = irq_desc_get_chip(desc);
75 int ret; 76 int ret;
76 desc = irq_desc + source_irq; 77
77 if (desc->chip->mask_ack) 78 if (chip->irq_mask_ack)
78 desc->chip->mask_ack(source_irq); 79 chip->irq_mask_ack(idata);
79 else { 80 else {
80 desc->chip->mask(source_irq); 81 chip->irq_mask(idata);
81 desc->chip->ack(source_irq); 82 chip->irq_ack(idata);
82 } 83 }
83 ret = cascade->get_irq(irq); 84 ret = cascade->get_irq(irq);
84 irq = ret; 85 irq = ret;
@@ -86,8 +87,8 @@ static void irq_dispatch(unsigned int irq)
86 atomic_inc(&irq_err_count); 87 atomic_inc(&irq_err_count);
87 else 88 else
88 irq_dispatch(irq); 89 irq_dispatch(irq);
89 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 90 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
90 desc->chip->unmask(source_irq); 91 chip->irq_unmask(idata);
91 } else 92 } else
92 do_IRQ(irq); 93 do_IRQ(irq);
93} 94}