aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/8253pit.h3
-rw-r--r--arch/alpha/kernel/perf_event.c2
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/dmabounce.c2
-rw-r--r--arch/arm/configs/mmp2_defconfig9
-rw-r--r--arch/arm/include/asm/i8253.h15
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/perf_event_v6.c30
-rw-r--r--arch/arm/kernel/perf_event_v7.c30
-rw-r--r--arch/arm/kernel/perf_event_xscale.c18
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/mach-at91/include/mach/at91_mci.h115
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-davinci/gpio.c21
-rw-r--r--arch/arm/mach-davinci/irq.c8
-rw-r--r--arch/arm/mach-davinci/pm.c1
-rw-r--r--arch/arm/mach-ep93xx/Makefile4
-rw-r--r--arch/arm/mach-ep93xx/core.c37
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c411
-rw-r--r--arch/arm/mach-ep93xx/dma.c108
-rw-r--r--arch/arm/mach-ep93xx/gpio.c410
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h190
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h2
-rw-r--r--arch/arm/mach-exynos4/cpu.c6
-rw-r--r--arch/arm/mach-exynos4/dev-audio.c2
-rw-r--r--arch/arm/mach-exynos4/headsmp.S2
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c8
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/isa-timer.c59
-rw-r--r--arch/arm/mach-imx/clock-imx25.c7
-rw-r--r--arch/arm/mach-imx/dma-v1.c1
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c2
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c2
-rw-r--r--arch/arm/mach-imx/mach-bug.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c2
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c2
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c3
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c2
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c2
-rw-r--r--arch/arm/mach-imx/mach-pca100.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c2
-rw-r--r--arch/arm/mach-imx/mach-qong.c2
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c2
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c2
-rw-r--r--arch/arm/mach-imx/mm-imx1.c21
-rw-r--r--arch/arm/mach-imx/mm-imx21.c21
-rw-r--r--arch/arm/mach-imx/mm-imx25.c17
-rw-r--r--arch/arm/mach-imx/mm-imx27.c22
-rw-r--r--arch/arm/mach-imx/mm-imx31.c15
-rw-r--r--arch/arm/mach-imx/mm-imx35.c16
-rw-r--r--arch/arm/mach-iop13xx/setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c10
-rw-r--r--arch/arm/mach-mmp/brownstone.c10
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/jasper.c2
-rw-r--r--arch/arm/mach-mmp/mmp2.c16
-rw-r--r--arch/arm/mach-mmp/pxa168.c2
-rw-r--r--arch/arm/mach-mmp/pxa910.c2
-rw-r--r--arch/arm/mach-msm/Kconfig19
-rw-r--r--arch/arm/mach-msm/Makefile2
-rw-r--r--arch/arm/mach-msm/iommu.c731
-rw-r--r--arch/arm/mach-msm/iommu_dev.c422
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c2
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51sd.c2
-rw-r--r--arch/arm/mach-mx5/board-mx50_rdp.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c11
-rw-r--r--arch/arm/mach-mx5/devices.c64
-rw-r--r--arch/arm/mach-mx5/mm-mx50.c22
-rw-r--r--arch/arm/mach-mx5/mm.c27
-rw-r--r--arch/arm/mach-mxs/Makefile2
-rw-r--r--arch/arm/mach-mxs/devices.c11
-rw-r--r--arch/arm/mach-mxs/devices/Makefile1
-rw-r--r--arch/arm/mach-mxs/devices/platform-auart.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-dma.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-fec.c1
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c53
-rw-r--r--arch/arm/mach-mxs/gpio.c331
-rw-r--r--arch/arm/mach-mxs/gpio.h34
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h2
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx23.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx28.c1
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c26
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c38
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c41
-rw-r--r--arch/arm/mach-omap1/pm_bus.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/gpio.c34
-rw-r--r--arch/arm/mach-omap2/serial.c1
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c4
-rw-r--r--arch/arm/mach-pxa/raumfeld.c36
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi-gpio.h28
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c34
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c41
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c1
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c2
-rw-r--r--arch/arm/mach-s3c64xx/dma.c14
-rw-r--r--arch/arm/mach-s5p64x0/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c5
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c5
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h29
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c160
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c22
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c11
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/clock.c7
-rw-r--r--arch/arm/mach-tegra/gpio.c431
-rw-r--r--arch/arm/mach-ux500/clock.c31
-rw-r--r--arch/arm/mach-vt8500/irq.c21
-rw-r--r--arch/arm/mm/cache-l2x0.c19
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/mm/mmu.c5
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/plat-mxc/Makefile2
-rw-r--r--arch/arm/plat-mxc/devices.c11
-rw-r--r--arch/arm/plat-mxc/devices/Makefile1
-rw-r--r--arch/arm/plat-mxc/devices/platform-fec.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-gpio-mxc.c32
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-fb.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-ipu-core.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc-ehci.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc-mmc.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c12
-rw-r--r--arch/arm/plat-mxc/gpio.c361
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h12
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/gpio.h27
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h21
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h1
-rw-r--r--arch/arm/plat-omap/clock.c12
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h20
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h9
-rw-r--r--arch/arm/plat-omap/omap_device.c53
-rw-r--r--arch/arm/plat-orion/gpio.c2
-rw-r--r--arch/arm/plat-pxa/gpio.c10
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h35
-rw-r--r--arch/arm/plat-s3c24xx/dma.c24
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c2
-rw-r--r--arch/arm/plat-s5p/s5p-time.c4
-rw-r--r--arch/arm/plat-samsung/clock.c7
-rw-r--r--arch/arm/plat-samsung/dma.c6
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/dma.h21
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h2
-rw-r--r--arch/arm/plat-samsung/irq-uart.c9
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c7
-rw-r--r--arch/arm/plat-spear/clock.c7
-rw-r--r--arch/blackfin/Kconfig10
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig8
-rw-r--r--arch/blackfin/include/asm/Kbuild43
-rw-r--r--arch/blackfin/include/asm/atomic.h13
-rw-r--r--arch/blackfin/include/asm/auxvec.h1
-rw-r--r--arch/blackfin/include/asm/bitsperlong.h1
-rw-r--r--arch/blackfin/include/asm/blackfin.h6
-rw-r--r--arch/blackfin/include/asm/bugs.h1
-rw-r--r--arch/blackfin/include/asm/cputime.h1
-rw-r--r--arch/blackfin/include/asm/current.h1
-rw-r--r--arch/blackfin/include/asm/device.h1
-rw-r--r--arch/blackfin/include/asm/div64.h1
-rw-r--r--arch/blackfin/include/asm/dpmc.h27
-rw-r--r--arch/blackfin/include/asm/emergency-restart.h1
-rw-r--r--arch/blackfin/include/asm/errno.h1
-rw-r--r--arch/blackfin/include/asm/fb.h1
-rw-r--r--arch/blackfin/include/asm/futex.h1
-rw-r--r--arch/blackfin/include/asm/gpio.h64
-rw-r--r--arch/blackfin/include/asm/gptimers.h19
-rw-r--r--arch/blackfin/include/asm/hw_irq.h1
-rw-r--r--arch/blackfin/include/asm/ioctl.h1
-rw-r--r--arch/blackfin/include/asm/ipcbuf.h1
-rw-r--r--arch/blackfin/include/asm/irq_regs.h1
-rw-r--r--arch/blackfin/include/asm/irqflags.h42
-rw-r--r--arch/blackfin/include/asm/kdebug.h1
-rw-r--r--arch/blackfin/include/asm/kmap_types.h1
-rw-r--r--arch/blackfin/include/asm/local.h1
-rw-r--r--arch/blackfin/include/asm/local64.h1
-rw-r--r--arch/blackfin/include/asm/mman.h1
-rw-r--r--arch/blackfin/include/asm/module.h8
-rw-r--r--arch/blackfin/include/asm/msgbuf.h1
-rw-r--r--arch/blackfin/include/asm/mutex.h77
-rw-r--r--arch/blackfin/include/asm/page.h8
-rw-r--r--arch/blackfin/include/asm/param.h1
-rw-r--r--arch/blackfin/include/asm/pda.h10
-rw-r--r--arch/blackfin/include/asm/percpu.h1
-rw-r--r--arch/blackfin/include/asm/pgalloc.h1
-rw-r--r--arch/blackfin/include/asm/resource.h1
-rw-r--r--arch/blackfin/include/asm/scatterlist.h6
-rw-r--r--arch/blackfin/include/asm/sections.h8
-rw-r--r--arch/blackfin/include/asm/sembuf.h1
-rw-r--r--arch/blackfin/include/asm/serial.h1
-rw-r--r--arch/blackfin/include/asm/setup.h1
-rw-r--r--arch/blackfin/include/asm/shmbuf.h1
-rw-r--r--arch/blackfin/include/asm/shmparam.h1
-rw-r--r--arch/blackfin/include/asm/sigcontext.h8
-rw-r--r--arch/blackfin/include/asm/socket.h1
-rw-r--r--arch/blackfin/include/asm/sockios.h1
-rw-r--r--arch/blackfin/include/asm/spinlock.h8
-rw-r--r--arch/blackfin/include/asm/statfs.h1
-rw-r--r--arch/blackfin/include/asm/termbits.h1
-rw-r--r--arch/blackfin/include/asm/termios.h1
-rw-r--r--arch/blackfin/include/asm/topology.h1
-rw-r--r--arch/blackfin/include/asm/types.h1
-rw-r--r--arch/blackfin/include/asm/ucontext.h1
-rw-r--r--arch/blackfin/include/asm/unaligned.h1
-rw-r--r--arch/blackfin/include/asm/user.h1
-rw-r--r--arch/blackfin/include/asm/xor.h1
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/asm-offsets.c10
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c26
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c109
-rw-r--r--arch/blackfin/kernel/gptimers.c93
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/blackfin/kernel/pwm.c100
-rw-r--r--arch/blackfin/kernel/reboot.c4
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/time.c4
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mach-bf518/Kconfig78
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c59
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c47
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h24
-rw-r--r--arch/blackfin/mach-bf518/include/mach/portmux.h54
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c19
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c55
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c62
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c98
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c70
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c28
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c10
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c29
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c36
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c1
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c78
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h19
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c51
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c63
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c38
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c176
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c51
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c25
-rw-r--r--arch/blackfin/mach-bf538/ext-gpio.c37
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h38
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h3
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c15
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c32
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h220
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h2
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c9
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c58
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c41
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h132
-rw-r--r--arch/blackfin/mach-bf561/include/mach/gpio.h6
-rw-r--r--arch/blackfin/mach-bf561/secondary.S152
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S1016
-rw-r--r--arch/blackfin/mach-common/head.S36
-rw-r--r--arch/blackfin/mach-common/ints-priority.c41
-rw-r--r--arch/blackfin/mach-common/smp.c17
-rw-r--r--arch/h8300/Kconfig.cpu4
-rw-r--r--arch/ia64/Kconfig27
-rw-r--r--arch/ia64/include/asm/clocksource.h10
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/sn/kernel/irq.c14
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c2
-rw-r--r--arch/m32r/Kconfig12
-rw-r--r--arch/m68k/emu/nfeth.c2
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h13
-rw-r--r--arch/microblaze/include/asm/pci.h3
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/pci/Makefile2
-rw-r--r--arch/microblaze/pci/pci-common.c112
-rw-r--r--arch/microblaze/pci/pci_32.c432
-rw-r--r--arch/mips/Kconfig20
-rw-r--r--arch/mips/cobalt/time.c2
-rw-r--r--arch/mips/include/asm/i8253.h24
-rw-r--r--arch/mips/include/asm/stacktrace.h4
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/kernel/i8253.c102
-rw-r--r--arch/mips/kernel/i8259.c22
-rw-r--r--arch/mips/kernel/perf_event.c2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c28
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/kernel/unaligned.c5
-rw-r--r--arch/mips/math-emu/cp1emu.c3
-rw-r--r--arch/mips/mm/fault.c8
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/backtrace.c175
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c2
-rw-r--r--arch/mips/sni/time.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/include/asm/8253pit.h3
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h29
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h40
-rw-r--r--arch/powerpc/include/asm/prom.h14
-rw-r--r--arch/powerpc/kernel/e500-pmu.c5
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c5
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/pci_32.c150
-rw-r--r--arch/powerpc/kernel/pci_dn.c47
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/perf_event.c6
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/power4-pmu.c5
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c5
-rw-r--r--arch/powerpc/kernel/power5-pmu.c5
-rw-r--r--arch/powerpc/kernel/power6-pmu.c5
-rw-r--r--arch/powerpc/kernel/power7-pmu.c5
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c5
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h227
-rw-r--r--arch/powerpc/net/bpf_jit_64.S138
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c694
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c380
-rw-r--r--arch/powerpc/platforms/amigaone/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c22
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig1
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/prep/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c30
-rw-r--r--arch/s390/crypto/sha256_s390.c66
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/sh/Kconfig16
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c15
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c15
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c6
-rw-r--r--arch/sh/kernel/ptrace_32.c5
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c8
-rw-r--r--arch/sh/math-emu/math.c2
-rw-r--r--arch/sh/mm/fault_32.c6
-rw-r--r--arch/sh/mm/tlbflush_64.c6
-rw-r--r--arch/sparc/include/asm/irqflags_32.h8
-rw-r--r--arch/sparc/include/asm/irqflags_64.h14
-rw-r--r--arch/sparc/include/asm/pci_32.h3
-rw-r--r--arch/sparc/include/asm/pci_64.h3
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/sparc/kernel/perf_event.c44
-rw-r--r--arch/sparc/kernel/unaligned_32.c4
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/kernel/visemul.c2
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_32.c8
-rw-r--r--arch/sparc/mm/fault_64.c8
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/um/sys-i386/Makefile3
-rw-r--r--arch/um/sys-x86_64/Makefile2
-rw-r--r--arch/x86/Kconfig103
-rw-r--r--arch/x86/Kconfig.cpu3
-rw-r--r--arch/x86/boot/Makefile9
-rw-r--r--arch/x86/boot/tools/build.c33
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c22
-rw-r--r--arch/x86/ia32/ia32entry.S10
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h8
-rw-r--r--arch/x86/include/asm/amd_iommu.h35
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h54
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h580
-rw-r--r--arch/x86/include/asm/apb_timer.h23
-rw-r--r--arch/x86/include/asm/asm.h5
-rw-r--r--arch/x86/include/asm/calling.h130
-rw-r--r--arch/x86/include/asm/clocksource.h18
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h48
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h45
-rw-r--r--arch/x86/include/asm/cpufeature.h10
-rw-r--r--arch/x86/include/asm/entry_arch.h4
-rw-r--r--arch/x86/include/asm/fixmap.h1
-rw-r--r--arch/x86/include/asm/frame.h11
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/i8253.h20
-rw-r--r--arch/x86/include/asm/irq_vectors.h11
-rw-r--r--arch/x86/include/asm/irqflags.h11
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/mce.h19
-rw-r--r--arch/x86/include/asm/mmzone_32.h8
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/perf_event.h5
-rw-r--r--arch/x86/include/asm/perf_event_p4.h33
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/prom.h11
-rw-r--r--arch/x86/include/asm/rwlock.h43
-rw-r--r--arch/x86/include/asm/segment.h2
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h8
-rw-r--r--arch/x86/include/asm/spinlock.h37
-rw-r--r--arch/x86/include/asm/spinlock_types.h6
-rw-r--r--arch/x86/include/asm/time.h6
-rw-r--r--arch/x86/include/asm/traps.h4
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/uaccess.h3
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h59
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h2889
-rw-r--r--arch/x86/include/asm/vgtod.h3
-rw-r--r--arch/x86/include/asm/vsyscall.h4
-rw-r--r--arch/x86/include/asm/vvar.h24
-rw-r--r--arch/x86/include/asm/xen/hypercall.h22
-rw-r--r--arch/x86/include/asm/xen/pci.h5
-rw-r--r--arch/x86/include/asm/xen/trace_types.h18
-rw-r--r--arch/x86/kernel/Makefile9
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S14
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.h6
-rw-r--r--arch/x86/kernel/acpi/sleep.c6
-rw-r--r--arch/x86/kernel/alternative.c23
-rw-r--r--arch/x86/kernel/amd_iommu.c2764
-rw-r--r--arch/x86/kernel/amd_iommu_init.c1572
-rw-r--r--arch/x86/kernel/apb_timer.c410
-rw-r--r--arch/x86/kernel/apic/apic.c27
-rw-r--r--arch/x86/kernel/apic/io_apic.c91
-rw-r--r--arch/x86/kernel/apm_32.c8
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c152
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c288
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c10
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c182
-rw-r--r--arch/x86/kernel/cpu/perf_event.c168
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c385
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c119
-rw-r--r--arch/x86/kernel/devicetree.c60
-rw-r--r--arch/x86/kernel/dumpstack_64.c37
-rw-r--r--arch/x86/kernel/entry_64.S84
-rw-r--r--arch/x86/kernel/hpet.c14
-rw-r--r--arch/x86/kernel/i8253.c99
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c21
-rw-r--r--arch/x86/kernel/ptrace.c5
-rw-r--r--arch/x86/kernel/quirks.c5
-rw-r--r--arch/x86/kernel/reboot.c32
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/kernel/signal.c56
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/tboot.c1
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/tsc.c26
-rw-r--r--arch/x86/kernel/vmlinux.lds.S49
-rw-r--r--arch/x86/kernel/vread_tsc_64.c36
-rw-r--r--arch/x86/kernel/vsyscall_64.c310
-rw-r--r--arch/x86/kernel/vsyscall_emu_64.S27
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/lguest/boot.c36
-rw-r--r--arch/x86/lguest/i386_head.S35
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/lib/copy_page_64.S9
-rw-r--r--arch/x86/lib/memmove_64.S11
-rw-r--r--arch/x86/lib/rwlock.S44
-rw-r--r--arch/x86/lib/rwlock_64.S38
-rw-r--r--arch/x86/lib/rwsem.S (renamed from arch/x86/lib/rwsem_64.S)75
-rw-r--r--arch/x86/lib/semaphore_32.S124
-rw-r--r--arch/x86/lib/thunk_64.S45
-rw-r--r--arch/x86/lib/usercopy.c43
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/kmemcheck/error.c2
-rw-r--r--arch/x86/mm/numa.c15
-rw-r--r--arch/x86/mm/numa_32.c6
-rw-r--r--arch/x86/mm/pageattr-test.c3
-rw-r--r--arch/x86/oprofile/backtrace.c21
-rw-r--r--arch/x86/oprofile/nmi_int.c14
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/pci/xen.c409
-rw-r--r--arch/x86/platform/efi/efi.c93
-rw-r--r--arch/x86/platform/uv/tlb_uv.c69
-rw-r--r--arch/x86/vdso/Makefile1
-rw-r--r--arch/x86/vdso/vclock_gettime.c103
-rw-r--r--arch/x86/vdso/vdso.S15
-rw-r--r--arch/x86/vdso/vma.c58
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/mmu.c139
-rw-r--r--arch/x86/xen/multicalls.c169
-rw-r--r--arch/x86/xen/multicalls.h6
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/trace.c61
-rw-r--r--arch/x86/xen/vga.c67
-rw-r--r--arch/x86/xen/xen-ops.h11
-rw-r--r--arch/xtensa/Kconfig13
552 files changed, 9324 insertions, 16575 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 60219bf94198..ca2da8da6e9c 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -6,6 +6,7 @@ config ALPHA
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_SYSCALL_WRAPPERS 7 select HAVE_SYSCALL_WRAPPERS
8 select HAVE_IRQ_WORK 8 select HAVE_IRQ_WORK
9 select HAVE_PCSPKR_PLATFORM
9 select HAVE_PERF_EVENTS 10 select HAVE_PERF_EVENTS
10 select HAVE_DMA_ATTRS 11 select HAVE_DMA_ATTRS
11 select HAVE_GENERIC_HARDIRQS 12 select HAVE_GENERIC_HARDIRQS
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a7..000000000000
--- a/arch/alpha/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 90561c45e7d8..8e47709160f8 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
847 data.period = event->hw.last_period; 847 data.period = event->hw.last_period;
848 848
849 if (alpha_perf_event_set_period(event, hwc, idx)) { 849 if (alpha_perf_event_set_period(event, hwc, idx)) {
850 if (perf_event_overflow(event, 1, &data, regs)) { 850 if (perf_event_overflow(event, &data, regs)) {
851 /* Interrupts coming too quickly; "throttle" the 851 /* Interrupts coming too quickly; "throttle" the
852 * counter, i.e., disable it for a little while. 852 * counter, i.e., disable it for a little while.
853 */ 853 */
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index 8de1046fe91e..f33648e4e8cf 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -26,7 +26,6 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/core_cia.h> 27#include <asm/core_cia.h>
28#include <asm/tlbflush.h> 28#include <asm/tlbflush.h>
29#include <asm/8253pit.h>
30 29
31#include "proto.h" 30#include "proto.h"
32#include "irq_impl.h" 31#include "irq_impl.h"
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 818e74ed45dc..e336694ca042 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -46,7 +46,6 @@
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47#include <asm/io.h> 47#include <asm/io.h>
48#include <asm/hwrpb.h> 48#include <asm/hwrpb.h>
49#include <asm/8253pit.h>
50#include <asm/rtc.h> 49#include <asm/rtc.h>
51 50
52#include <linux/mc146818rtc.h> 51#include <linux/mc146818rtc.h>
@@ -91,7 +90,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
91#define test_irq_work_pending() __get_cpu_var(irq_work_pending) 90#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
92#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 91#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
93 92
94void set_irq_work_pending(void) 93void arch_irq_work_raise(void)
95{ 94{
96 set_irq_work_pending_flag(); 95 set_irq_work_pending_flag();
97} 96}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9adc278a22ab..e04fa9d7637c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -642,6 +642,7 @@ config ARCH_SHMOBILE
642 select NO_IOPORT 642 select NO_IOPORT
643 select SPARSE_IRQ 643 select SPARSE_IRQ
644 select MULTI_IRQ_HANDLER 644 select MULTI_IRQ_HANDLER
645 select PM_GENERIC_DOMAINS if PM
645 help 646 help
646 Support for Renesas's SH-Mobile and R-Mobile ARM platforms. 647 Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
647 648
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index e5681636626f..841df7d21c2f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -255,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
255 if (buf == 0) { 255 if (buf == 0) {
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr); 257 __func__, ptr);
258 return 0; 258 return ~0;
259 } 259 }
260 260
261 dev_dbg(dev, 261 dev_dbg(dev,
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index 47ad3b1a4fee..5a584520db2f 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MODULE_UNLOAD=y
8CONFIG_MODULE_FORCE_UNLOAD=y 8CONFIG_MODULE_FORCE_UNLOAD=y
9# CONFIG_BLK_DEV_BSG is not set 9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_ARCH_MMP=y 10CONFIG_ARCH_MMP=y
11CONFIG_MACH_BROWNSTONE=y
11CONFIG_MACH_FLINT=y 12CONFIG_MACH_FLINT=y
12CONFIG_MACH_MARVELL_JASPER=y 13CONFIG_MACH_MARVELL_JASPER=y
13CONFIG_HIGH_RES_TIMERS=y 14CONFIG_HIGH_RES_TIMERS=y
@@ -63,10 +64,16 @@ CONFIG_BACKLIGHT_MAX8925=y
63# CONFIG_USB_SUPPORT is not set 64# CONFIG_USB_SUPPORT is not set
64CONFIG_RTC_CLASS=y 65CONFIG_RTC_CLASS=y
65CONFIG_RTC_DRV_MAX8925=y 66CONFIG_RTC_DRV_MAX8925=y
67CONFIG_MMC=y
66# CONFIG_DNOTIFY is not set 68# CONFIG_DNOTIFY is not set
67CONFIG_INOTIFY=y 69CONFIG_INOTIFY=y
68CONFIG_TMPFS=y 70CONFIG_TMPFS=y
69CONFIG_TMPFS_POSIX_ACL=y 71CONFIG_TMPFS_POSIX_ACL=y
72CONFIG_EXT2_FS=y
73CONFIG_EXT3_FS=y
74CONFIG_EXT4_FS=y
75CONFIG_MSDOS_FS=y
76CONFIG_FAT_DEFAULT_CODEPAGE=437
70CONFIG_JFFS2_FS=y 77CONFIG_JFFS2_FS=y
71CONFIG_CRAMFS=y 78CONFIG_CRAMFS=y
72CONFIG_NFS_FS=y 79CONFIG_NFS_FS=y
@@ -81,7 +88,7 @@ CONFIG_DEBUG_KERNEL=y
81# CONFIG_DEBUG_PREEMPT is not set 88# CONFIG_DEBUG_PREEMPT is not set
82CONFIG_DEBUG_INFO=y 89CONFIG_DEBUG_INFO=y
83# CONFIG_RCU_CPU_STALL_DETECTOR is not set 90# CONFIG_RCU_CPU_STALL_DETECTOR is not set
84CONFIG_DYNAMIC_DEBUG=y 91# CONFIG_DYNAMIC_DEBUG is not set
85CONFIG_DEBUG_USER=y 92CONFIG_DEBUG_USER=y
86CONFIG_DEBUG_ERRORS=y 93CONFIG_DEBUG_ERRORS=y
87# CONFIG_CRYPTO_ANSI_CPRNG is not set 94# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h
deleted file mode 100644
index 70656b69d5ce..000000000000
--- a/arch/arm/include/asm/i8253.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef __ASMARM_I8253_H
2#define __ASMARM_I8253_H
3
4/* i8253A PIT registers */
5#define PIT_MODE 0x43
6#define PIT_CH0 0x40
7
8#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
9
10extern raw_spinlock_t i8253_lock;
11
12#define outb_pit outb_p
13#define inb_pit inb_p
14
15#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d53c0abc4dd3..2b5b1421596c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event)
583static void armpmu_enable(struct pmu *pmu) 583static void armpmu_enable(struct pmu *pmu)
584{ 584{
585 /* Enable all of the perf events on hardware. */ 585 /* Enable all of the perf events on hardware. */
586 int idx; 586 int idx, enabled = 0;
587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
588 588
589 if (!armpmu) 589 if (!armpmu)
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu)
596 continue; 596 continue;
597 597
598 armpmu->enable(&event->hw, idx); 598 armpmu->enable(&event->hw, idx);
599 enabled = 1;
599 } 600 }
600 601
601 armpmu->start(); 602 if (enabled)
603 armpmu->start();
602} 604}
603 605
604static void armpmu_disable(struct pmu *pmu) 606static void armpmu_disable(struct pmu *pmu)
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index f1e8dd94afe8..dd7f3b9f4cb3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
173 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 173 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
174 }, 174 },
175 }, 175 },
176 [C(NODE)] = {
177 [C(OP_READ)] = {
178 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
179 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
180 },
181 [C(OP_WRITE)] = {
182 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
183 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
184 },
185 [C(OP_PREFETCH)] = {
186 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
187 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
188 },
189 },
176}; 190};
177 191
178enum armv6mpcore_perf_types { 192enum armv6mpcore_perf_types {
@@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
310 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 324 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
311 }, 325 },
312 }, 326 },
327 [C(NODE)] = {
328 [C(OP_READ)] = {
329 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
330 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
331 },
332 [C(OP_WRITE)] = {
333 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
334 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
335 },
336 [C(OP_PREFETCH)] = {
337 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
338 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
339 },
340 },
313}; 341};
314 342
315static inline unsigned long 343static inline unsigned long
@@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
479 if (!armpmu_event_set_period(event, hwc, idx)) 507 if (!armpmu_event_set_period(event, hwc, idx))
480 continue; 508 continue;
481 509
482 if (perf_event_overflow(event, 0, &data, regs)) 510 if (perf_event_overflow(event, &data, regs))
483 armpmu->disable(hwc, idx); 511 armpmu->disable(hwc, idx);
484 } 512 }
485 513
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4960686afb58..e20ca9cafef5 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -255,6 +255,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
255 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 255 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
256 }, 256 },
257 }, 257 },
258 [C(NODE)] = {
259 [C(OP_READ)] = {
260 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
261 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
262 },
263 [C(OP_WRITE)] = {
264 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
265 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
266 },
267 [C(OP_PREFETCH)] = {
268 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
269 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
270 },
271 },
258}; 272};
259 273
260/* 274/*
@@ -371,6 +385,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
371 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 385 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
372 }, 386 },
373 }, 387 },
388 [C(NODE)] = {
389 [C(OP_READ)] = {
390 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
391 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
392 },
393 [C(OP_WRITE)] = {
394 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
395 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
396 },
397 [C(OP_PREFETCH)] = {
398 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
399 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
400 },
401 },
374}; 402};
375 403
376/* 404/*
@@ -787,7 +815,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
787 if (!armpmu_event_set_period(event, hwc, idx)) 815 if (!armpmu_event_set_period(event, hwc, idx))
788 continue; 816 continue;
789 817
790 if (perf_event_overflow(event, 0, &data, regs)) 818 if (perf_event_overflow(event, &data, regs))
791 armpmu->disable(hwc, idx); 819 armpmu->disable(hwc, idx);
792 } 820 }
793 821
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 39affbe4fdb2..3c4397491d08 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
144 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 144 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
145 }, 145 },
146 }, 146 },
147 [C(NODE)] = {
148 [C(OP_READ)] = {
149 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
150 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
151 },
152 [C(OP_WRITE)] = {
153 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
154 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
155 },
156 [C(OP_PREFETCH)] = {
157 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
158 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
159 },
160 },
147}; 161};
148 162
149#define XSCALE_PMU_ENABLE 0x001 163#define XSCALE_PMU_ENABLE 0x001
@@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
251 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event, hwc, idx))
252 continue; 266 continue;
253 267
254 if (perf_event_overflow(event, 0, &data, regs)) 268 if (perf_event_overflow(event, &data, regs))
255 armpmu->disable(hwc, idx); 269 armpmu->disable(hwc, idx);
256 } 270 }
257 271
@@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
583 if (!armpmu_event_set_period(event, hwc, idx)) 597 if (!armpmu_event_set_period(event, hwc, idx))
584 continue; 598 continue;
585 599
586 if (perf_event_overflow(event, 0, &data, regs)) 600 if (perf_event_overflow(event, &data, regs))
587 armpmu->disable(hwc, idx); 601 armpmu->disable(hwc, idx);
588 } 602 }
589 603
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 97260060bf26..5c199610719f 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
396/* 396/*
397 * Handle hitting a HW-breakpoint. 397 * Handle hitting a HW-breakpoint.
398 */ 398 */
399static void ptrace_hbptriggered(struct perf_event *bp, int unused, 399static void ptrace_hbptriggered(struct perf_event *bp,
400 struct perf_sample_data *data, 400 struct perf_sample_data *data,
401 struct pt_regs *regs) 401 struct pt_regs *regs)
402{ 402{
@@ -479,7 +479,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
479 attr.bp_type = type; 479 attr.bp_type = type;
480 attr.disabled = 1; 480 attr.disabled = 1;
481 481
482 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); 482 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
483 tsk);
483} 484}
484 485
485static int ptrace_gethbpregs(struct task_struct *tsk, long num, 486static int ptrace_gethbpregs(struct task_struct *tsk, long num,
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ed11fb08b05a..acbb447ac6b5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void paging_init(struct machine_desc *desc); 75extern void paging_init(struct machine_desc *desc);
76extern void sanity_check_meminfo(void);
76extern void reboot_setup(char *str); 77extern void reboot_setup(char *str);
77 78
78unsigned int processor_id; 79unsigned int processor_id;
@@ -900,6 +901,7 @@ void __init setup_arch(char **cmdline_p)
900 901
901 parse_early_param(); 902 parse_early_param();
902 903
904 sanity_check_meminfo();
903 arm_memblock_init(&meminfo, mdesc); 905 arm_memblock_init(&meminfo, mdesc);
904 906
905 paging_init(mdesc); 907 paging_init(mdesc);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 60636f499cb3..2c277d40cee6 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void)
115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
116 116
117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
118 (twd_timer_rate / 1000000) % 100); 118 (twd_timer_rate / 10000) % 100);
119 } 119 }
120} 120}
121 121
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5045e4..5f452f8fde05 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
183 unsigned int address, destreg, data, type; 183 unsigned int address, destreg, data, type;
184 unsigned int res = 0; 184 unsigned int res = 0;
185 185
186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); 186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
187 187
188 if (current->pid != previous_pid) { 188 if (current->pid != previous_pid) {
189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", 189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h
deleted file mode 100644
index 02182c16a022..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_mci.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * arch/arm/mach-at91/include/mach/at91_mci.h
3 *
4 * Copyright (C) 2005 Ivan Kokshaysky
5 * Copyright (C) SAN People
6 *
7 * MultiMedia Card Interface (MCI) registers.
8 * Based on AT91RM9200 datasheet revision F.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef AT91_MCI_H
17#define AT91_MCI_H
18
19#define AT91_MCI_CR 0x00 /* Control Register */
20#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */
21#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */
22#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */
23#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */
24#define AT91_MCI_SWRST (1 << 7) /* Software Reset */
25
26#define AT91_MCI_MR 0x04 /* Mode Register */
27#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */
28#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */
29#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */
30#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */
31#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */
32#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */
33#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */
34#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */
35
36#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */
37#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */
38#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */
39#define AT91_MCI_DTOMUL_1 (0 << 4)
40#define AT91_MCI_DTOMUL_16 (1 << 4)
41#define AT91_MCI_DTOMUL_128 (2 << 4)
42#define AT91_MCI_DTOMUL_256 (3 << 4)
43#define AT91_MCI_DTOMUL_1K (4 << 4)
44#define AT91_MCI_DTOMUL_4K (5 << 4)
45#define AT91_MCI_DTOMUL_64K (6 << 4)
46#define AT91_MCI_DTOMUL_1M (7 << 4)
47
48#define AT91_MCI_SDCR 0x0c /* SD Card Register */
49#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */
50#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */
51
52#define AT91_MCI_ARGR 0x10 /* Argument Register */
53
54#define AT91_MCI_CMDR 0x14 /* Command Register */
55#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */
56#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */
57#define AT91_MCI_RSPTYP_NONE (0 << 6)
58#define AT91_MCI_RSPTYP_48 (1 << 6)
59#define AT91_MCI_RSPTYP_136 (2 << 6)
60#define AT91_MCI_SPCMD (7 << 8) /* Special Command */
61#define AT91_MCI_SPCMD_NONE (0 << 8)
62#define AT91_MCI_SPCMD_INIT (1 << 8)
63#define AT91_MCI_SPCMD_SYNC (2 << 8)
64#define AT91_MCI_SPCMD_ICMD (4 << 8)
65#define AT91_MCI_SPCMD_IRESP (5 << 8)
66#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */
67#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */
68#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */
69#define AT91_MCI_TRCMD_NONE (0 << 16)
70#define AT91_MCI_TRCMD_START (1 << 16)
71#define AT91_MCI_TRCMD_STOP (2 << 16)
72#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */
73#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */
74#define AT91_MCI_TRTYP_BLOCK (0 << 19)
75#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
76#define AT91_MCI_TRTYP_STREAM (2 << 19)
77#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
78#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
79
80#define AT91_MCI_BLKR 0x18 /* Block Register */
81#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
82#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */
83
84#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */
85#define AT91_MCR_RDR 0x30 /* Receive Data Register */
86#define AT91_MCR_TDR 0x34 /* Transmit Data Register */
87
88#define AT91_MCI_SR 0x40 /* Status Register */
89#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */
90#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */
91#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */
92#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */
93#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */
94#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */
95#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */
96#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */
97#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */
98#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */
99#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */
100#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */
101#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */
102#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */
103#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */
104#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */
105#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */
106#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */
107#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */
108#define AT91_MCI_OVRE (1 << 30) /* Overrun */
109#define AT91_MCI_UNRE (1 << 31) /* Underrun */
110
111#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */
112#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */
113#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */
114
115#endif
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index c67f684ee3e5..09a87e61ffcf 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -520,7 +520,7 @@ fail:
520 */ 520 */
521 if (have_imager()) { 521 if (have_imager()) {
522 label = "HD imager"; 522 label = "HD imager";
523 mux |= 1; 523 mux |= 2;
524 524
525 /* externally mux MMC1/ENET/AIC33 to imager */ 525 /* externally mux MMC1/ENET/AIC33 to imager */
526 mux |= BIT(6) | BIT(5) | BIT(3); 526 mux |= BIT(6) | BIT(5) | BIT(3);
@@ -540,7 +540,7 @@ fail:
540 resets &= ~BIT(1); 540 resets &= ~BIT(1);
541 541
542 if (have_tvp7002()) { 542 if (have_tvp7002()) {
543 mux |= 2; 543 mux |= 1;
544 resets &= ~BIT(2); 544 resets &= ~BIT(2);
545 label = "tvp7002 HD"; 545 label = "tvp7002 HD";
546 } else { 546 } else {
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 606a6f27ed6c..5f5d78308873 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
22 22
23#include <asm/io.h>
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
24#include <asm/mach/arch.h> 25#include <asm/mach/arch.h>
25#include <mach/common.h> 26#include <mach/common.h>
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 1e0f809644bb..e00d61e2efbe 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -8,6 +8,7 @@
8 * is licensed "as is" without any warranty of any kind, whether express 8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied. 9 * or implied.
10 */ 10 */
11#include <linux/dma-mapping.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/clk.h> 13#include <linux/clk.h>
13#include <linux/serial_8250.h> 14#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index e7221398e5af..cafbe13a82a5 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
254{ 254{
255 struct davinci_gpio_regs __iomem *g; 255 struct davinci_gpio_regs __iomem *g;
256 u32 mask = 0xffff; 256 u32 mask = 0xffff;
257 struct davinci_gpio_controller *d;
257 258
258 g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); 259 d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
260 g = (struct davinci_gpio_regs __iomem *)d->regs;
259 261
260 /* we only care about one bank */ 262 /* we only care about one bank */
261 if (irq & 1) 263 if (irq & 1)
@@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
274 if (!status) 276 if (!status)
275 break; 277 break;
276 __raw_writel(status, &g->intstat); 278 __raw_writel(status, &g->intstat);
277 if (irq & 1)
278 status >>= 16;
279 279
280 /* now demux them to the right lowlevel handler */ 280 /* now demux them to the right lowlevel handler */
281 n = (int)irq_get_handler_data(irq); 281 n = d->irq_base;
282 if (irq & 1) {
283 n += 16;
284 status >>= 16;
285 }
286
282 while (status) { 287 while (status) {
283 res = ffs(status); 288 res = ffs(status);
284 n += res; 289 n += res;
@@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void)
424 429
425 /* set up all irqs in this bank */ 430 /* set up all irqs in this bank */
426 irq_set_chained_handler(bank_irq, gpio_irq_handler); 431 irq_set_chained_handler(bank_irq, gpio_irq_handler);
427 irq_set_handler_data(bank_irq, (__force void *)g); 432
433 /*
434 * Each chip handles 32 gpios, and each irq bank consists of 16
435 * gpio irqs. Pass the irq bank's corresponding controller to
436 * the chained irq handler.
437 */
438 irq_set_handler_data(bank_irq, &chips[gpio / 32]);
428 439
429 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 440 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
430 irq_set_chip(irq, &gpio_irqchip); 441 irq_set_chip(irq, &gpio_irqchip);
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index bfe68ec4e1a6..952dc126c390 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
52 struct irq_chip_type *ct; 52 struct irq_chip_type *ct;
53 53
54 gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); 54 gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
55 if (!gc) {
56 pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
57 __func__, irq_start);
58 return;
59 }
60
55 ct = gc->chip_types; 61 ct = gc->chip_types;
56 ct->chip.irq_ack = irq_gc_ack; 62 ct->chip.irq_ack = irq_gc_ack_set_bit;
57 ct->chip.irq_mask = irq_gc_mask_clr_bit; 63 ct->chip.irq_mask = irq_gc_mask_clr_bit;
58 ct->chip.irq_unmask = irq_gc_mask_set_bit; 64 ct->chip.irq_unmask = irq_gc_mask_set_bit;
59 65
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 1bd73a04be20..04c49f7543ef 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/io.h>
20 21
21#include <mach/da8xx.h> 22#include <mach/da8xx.h>
22#include <mach/sram.h> 23#include <mach/sram.h>
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 33ee2c863d18..3cedcf2d39e5 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -1,11 +1,13 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4obj-y := core.o clock.o dma-m2p.o gpio.o 4obj-y := core.o clock.o
5obj-m := 5obj-m :=
6obj-n := 6obj-n :=
7obj- := 7obj- :=
8 8
9obj-$(CONFIG_EP93XX_DMA) += dma.o
10
9obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o 11obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o
10obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o 12obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
11obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o 13obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 1d4b65fd673e..c60f081e930b 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -174,14 +174,10 @@ struct sys_timer ep93xx_timer = {
174/************************************************************************* 174/*************************************************************************
175 * EP93xx IRQ handling 175 * EP93xx IRQ handling
176 *************************************************************************/ 176 *************************************************************************/
177extern void ep93xx_gpio_init_irq(void);
178
179void __init ep93xx_init_irq(void) 177void __init ep93xx_init_irq(void)
180{ 178{
181 vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0); 179 vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0);
182 vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0); 180 vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0);
183
184 ep93xx_gpio_init_irq();
185} 181}
186 182
187 183
@@ -241,6 +237,24 @@ unsigned int ep93xx_chip_revision(void)
241} 237}
242 238
243/************************************************************************* 239/*************************************************************************
240 * EP93xx GPIO
241 *************************************************************************/
242static struct resource ep93xx_gpio_resource[] = {
243 {
244 .start = EP93XX_GPIO_PHYS_BASE,
245 .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1,
246 .flags = IORESOURCE_MEM,
247 },
248};
249
250static struct platform_device ep93xx_gpio_device = {
251 .name = "gpio-ep93xx",
252 .id = -1,
253 .num_resources = ARRAY_SIZE(ep93xx_gpio_resource),
254 .resource = ep93xx_gpio_resource,
255};
256
257/*************************************************************************
244 * EP93xx peripheral handling 258 * EP93xx peripheral handling
245 *************************************************************************/ 259 *************************************************************************/
246#define EP93XX_UART_MCR_OFFSET (0x0100) 260#define EP93XX_UART_MCR_OFFSET (0x0100)
@@ -251,9 +265,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev,
251 unsigned int mcr; 265 unsigned int mcr;
252 266
253 mcr = 0; 267 mcr = 0;
254 if (!(mctrl & TIOCM_RTS)) 268 if (mctrl & TIOCM_RTS)
255 mcr |= 2; 269 mcr |= 2;
256 if (!(mctrl & TIOCM_DTR)) 270 if (mctrl & TIOCM_DTR)
257 mcr |= 1; 271 mcr |= 1;
258 272
259 __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); 273 __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
@@ -492,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = {
492 }, 506 },
493}; 507};
494 508
509static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
510
495static struct platform_device ep93xx_spi_device = { 511static struct platform_device ep93xx_spi_device = {
496 .name = "ep93xx-spi", 512 .name = "ep93xx-spi",
497 .id = 0, 513 .id = 0,
498 .dev = { 514 .dev = {
499 .platform_data = &ep93xx_spi_master_data, 515 .platform_data = &ep93xx_spi_master_data,
516 .coherent_dma_mask = DMA_BIT_MASK(32),
517 .dma_mask = &ep93xx_spi_dma_mask,
500 }, 518 },
501 .num_resources = ARRAY_SIZE(ep93xx_spi_resources), 519 .num_resources = ARRAY_SIZE(ep93xx_spi_resources),
502 .resource = ep93xx_spi_resources, 520 .resource = ep93xx_spi_resources,
@@ -870,14 +888,13 @@ void __init ep93xx_register_ac97(void)
870 platform_device_register(&ep93xx_pcm_device); 888 platform_device_register(&ep93xx_pcm_device);
871} 889}
872 890
873extern void ep93xx_gpio_init(void);
874
875void __init ep93xx_init_devices(void) 891void __init ep93xx_init_devices(void)
876{ 892{
877 /* Disallow access to MaverickCrunch initially */ 893 /* Disallow access to MaverickCrunch initially */
878 ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); 894 ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
879 895
880 ep93xx_gpio_init(); 896 /* Get the GPIO working early, other devices need it */
897 platform_device_register(&ep93xx_gpio_device);
881 898
882 amba_device_register(&uart1_device, &iomem_resource); 899 amba_device_register(&uart1_device, &iomem_resource);
883 amba_device_register(&uart2_device, &iomem_resource); 900 amba_device_register(&uart2_device, &iomem_resource);
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
deleted file mode 100644
index a696d354b1f8..000000000000
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ /dev/null
@@ -1,411 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
7 *
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16/*
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
19 *
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
26 *
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
29 */
30
31#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
32
33#include <linux/kernel.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/module.h>
38#include <linux/io.h>
39
40#include <mach/dma.h>
41#include <mach/hardware.h>
42
43#define M2P_CONTROL 0x00
44#define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
45#define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
46#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
47#define M2P_CONTROL_ENABLE (1 << 4)
48#define M2P_INTERRUPT 0x04
49#define M2P_INTERRUPT_STALL (1 << 0)
50#define M2P_INTERRUPT_NFB (1 << 1)
51#define M2P_INTERRUPT_ERROR (1 << 3)
52#define M2P_PPALLOC 0x08
53#define M2P_STATUS 0x0c
54#define M2P_REMAIN 0x14
55#define M2P_MAXCNT0 0x20
56#define M2P_BASE0 0x24
57#define M2P_MAXCNT1 0x30
58#define M2P_BASE1 0x34
59
60#define STATE_IDLE 0 /* Channel is inactive. */
61#define STATE_STALL 1 /* Channel is active, no buffers pending. */
62#define STATE_ON 2 /* Channel is active, one buffer pending. */
63#define STATE_NEXT 3 /* Channel is active, two buffers pending. */
64
65struct m2p_channel {
66 char *name;
67 void __iomem *base;
68 int irq;
69
70 struct clk *clk;
71 spinlock_t lock;
72
73 void *client;
74 unsigned next_slot:1;
75 struct ep93xx_dma_buffer *buffer_xfer;
76 struct ep93xx_dma_buffer *buffer_next;
77 struct list_head buffers_pending;
78};
79
80static struct m2p_channel m2p_rx[] = {
81 {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
82 {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
83 {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
84 {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
85 {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
86 {NULL},
87};
88
89static struct m2p_channel m2p_tx[] = {
90 {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
91 {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
92 {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
93 {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
94 {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
95 {NULL},
96};
97
98static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
99{
100 if (ch->next_slot == 0) {
101 writel(buf->size, ch->base + M2P_MAXCNT0);
102 writel(buf->bus_addr, ch->base + M2P_BASE0);
103 } else {
104 writel(buf->size, ch->base + M2P_MAXCNT1);
105 writel(buf->bus_addr, ch->base + M2P_BASE1);
106 }
107 ch->next_slot ^= 1;
108}
109
110static void choose_buffer_xfer(struct m2p_channel *ch)
111{
112 struct ep93xx_dma_buffer *buf;
113
114 ch->buffer_xfer = NULL;
115 if (!list_empty(&ch->buffers_pending)) {
116 buf = list_entry(ch->buffers_pending.next,
117 struct ep93xx_dma_buffer, list);
118 list_del(&buf->list);
119 feed_buf(ch, buf);
120 ch->buffer_xfer = buf;
121 }
122}
123
124static void choose_buffer_next(struct m2p_channel *ch)
125{
126 struct ep93xx_dma_buffer *buf;
127
128 ch->buffer_next = NULL;
129 if (!list_empty(&ch->buffers_pending)) {
130 buf = list_entry(ch->buffers_pending.next,
131 struct ep93xx_dma_buffer, list);
132 list_del(&buf->list);
133 feed_buf(ch, buf);
134 ch->buffer_next = buf;
135 }
136}
137
138static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
139{
140 /*
141 * The control register must be read immediately after being written so
142 * that the internal state machine is correctly updated. See the ep93xx
143 * users' guide for details.
144 */
145 writel(v, ch->base + M2P_CONTROL);
146 readl(ch->base + M2P_CONTROL);
147}
148
149static inline int m2p_channel_state(struct m2p_channel *ch)
150{
151 return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
152}
153
154static irqreturn_t m2p_irq(int irq, void *dev_id)
155{
156 struct m2p_channel *ch = dev_id;
157 struct ep93xx_dma_m2p_client *cl;
158 u32 irq_status, v;
159 int error = 0;
160
161 cl = ch->client;
162
163 spin_lock(&ch->lock);
164 irq_status = readl(ch->base + M2P_INTERRUPT);
165
166 if (irq_status & M2P_INTERRUPT_ERROR) {
167 writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
168 error = 1;
169 }
170
171 if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
172 spin_unlock(&ch->lock);
173 return IRQ_NONE;
174 }
175
176 switch (m2p_channel_state(ch)) {
177 case STATE_IDLE:
178 pr_crit("dma interrupt without a dma buffer\n");
179 BUG();
180 break;
181
182 case STATE_STALL:
183 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
184 if (ch->buffer_next != NULL) {
185 cl->buffer_finished(cl->cookie, ch->buffer_next,
186 0, error);
187 }
188 choose_buffer_xfer(ch);
189 choose_buffer_next(ch);
190 if (ch->buffer_xfer != NULL)
191 cl->buffer_started(cl->cookie, ch->buffer_xfer);
192 break;
193
194 case STATE_ON:
195 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
196 ch->buffer_xfer = ch->buffer_next;
197 choose_buffer_next(ch);
198 cl->buffer_started(cl->cookie, ch->buffer_xfer);
199 break;
200
201 case STATE_NEXT:
202 pr_crit("dma interrupt while next\n");
203 BUG();
204 break;
205 }
206
207 v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
208 M2P_CONTROL_NFB_IRQ_EN);
209 if (ch->buffer_xfer != NULL)
210 v |= M2P_CONTROL_STALL_IRQ_EN;
211 if (ch->buffer_next != NULL)
212 v |= M2P_CONTROL_NFB_IRQ_EN;
213 m2p_set_control(ch, v);
214
215 spin_unlock(&ch->lock);
216 return IRQ_HANDLED;
217}
218
219static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
220{
221 struct m2p_channel *ch;
222 int i;
223
224 if (cl->flags & EP93XX_DMA_M2P_RX)
225 ch = m2p_rx;
226 else
227 ch = m2p_tx;
228
229 for (i = 0; ch[i].base; i++) {
230 struct ep93xx_dma_m2p_client *client;
231
232 client = ch[i].client;
233 if (client != NULL) {
234 int port;
235
236 port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
237 if (port == (client->flags &
238 EP93XX_DMA_M2P_PORT_MASK)) {
239 pr_warning("DMA channel already used by %s\n",
240 cl->name ? : "unknown client");
241 return ERR_PTR(-EBUSY);
242 }
243 }
244 }
245
246 for (i = 0; ch[i].base; i++) {
247 if (ch[i].client == NULL)
248 return ch + i;
249 }
250
251 pr_warning("No free DMA channel for %s\n",
252 cl->name ? : "unknown client");
253 return ERR_PTR(-ENODEV);
254}
255
256static void channel_enable(struct m2p_channel *ch)
257{
258 struct ep93xx_dma_m2p_client *cl = ch->client;
259 u32 v;
260
261 clk_enable(ch->clk);
262
263 v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
264 writel(v, ch->base + M2P_PPALLOC);
265
266 v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
267 v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
268 m2p_set_control(ch, v);
269}
270
271static void channel_disable(struct m2p_channel *ch)
272{
273 u32 v;
274
275 v = readl(ch->base + M2P_CONTROL);
276 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
277 m2p_set_control(ch, v);
278
279 while (m2p_channel_state(ch) >= STATE_ON)
280 cpu_relax();
281
282 m2p_set_control(ch, 0x0);
283
284 while (m2p_channel_state(ch) == STATE_STALL)
285 cpu_relax();
286
287 clk_disable(ch->clk);
288}
289
290int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
291{
292 struct m2p_channel *ch;
293 int err;
294
295 ch = find_free_channel(cl);
296 if (IS_ERR(ch))
297 return PTR_ERR(ch);
298
299 err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
300 if (err)
301 return err;
302
303 ch->client = cl;
304 ch->next_slot = 0;
305 ch->buffer_xfer = NULL;
306 ch->buffer_next = NULL;
307 INIT_LIST_HEAD(&ch->buffers_pending);
308
309 cl->channel = ch;
310
311 channel_enable(ch);
312
313 return 0;
314}
315EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
316
317void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
318{
319 struct m2p_channel *ch = cl->channel;
320
321 channel_disable(ch);
322 free_irq(ch->irq, ch);
323 ch->client = NULL;
324}
325EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
326
327void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
328 struct ep93xx_dma_buffer *buf)
329{
330 struct m2p_channel *ch = cl->channel;
331 unsigned long flags;
332 u32 v;
333
334 spin_lock_irqsave(&ch->lock, flags);
335 v = readl(ch->base + M2P_CONTROL);
336 if (ch->buffer_xfer == NULL) {
337 ch->buffer_xfer = buf;
338 feed_buf(ch, buf);
339 cl->buffer_started(cl->cookie, buf);
340
341 v |= M2P_CONTROL_STALL_IRQ_EN;
342 m2p_set_control(ch, v);
343
344 } else if (ch->buffer_next == NULL) {
345 ch->buffer_next = buf;
346 feed_buf(ch, buf);
347
348 v |= M2P_CONTROL_NFB_IRQ_EN;
349 m2p_set_control(ch, v);
350 } else {
351 list_add_tail(&buf->list, &ch->buffers_pending);
352 }
353 spin_unlock_irqrestore(&ch->lock, flags);
354}
355EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
356
357void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
358 struct ep93xx_dma_buffer *buf)
359{
360 struct m2p_channel *ch = cl->channel;
361
362 list_add_tail(&buf->list, &ch->buffers_pending);
363}
364EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
365
366void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
367{
368 struct m2p_channel *ch = cl->channel;
369
370 channel_disable(ch);
371 ch->next_slot = 0;
372 ch->buffer_xfer = NULL;
373 ch->buffer_next = NULL;
374 INIT_LIST_HEAD(&ch->buffers_pending);
375 channel_enable(ch);
376}
377EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
378
379static int init_channel(struct m2p_channel *ch)
380{
381 ch->clk = clk_get(NULL, ch->name);
382 if (IS_ERR(ch->clk))
383 return PTR_ERR(ch->clk);
384
385 spin_lock_init(&ch->lock);
386 ch->client = NULL;
387
388 return 0;
389}
390
391static int __init ep93xx_dma_m2p_init(void)
392{
393 int i;
394 int ret;
395
396 for (i = 0; m2p_rx[i].base; i++) {
397 ret = init_channel(m2p_rx + i);
398 if (ret)
399 return ret;
400 }
401
402 for (i = 0; m2p_tx[i].base; i++) {
403 ret = init_channel(m2p_tx + i);
404 if (ret)
405 return ret;
406 }
407
408 pr_info("M2P DMA subsystem initialized\n");
409 return 0;
410}
411arch_initcall(ep93xx_dma_m2p_init);
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
new file mode 100644
index 000000000000..5a2570881255
--- /dev/null
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -0,0 +1,108 @@
1/*
2 * arch/arm/mach-ep93xx/dma.c
3 *
4 * Platform support code for the EP93xx dmaengine driver.
5 *
6 * Copyright (C) 2011 Mika Westerberg
7 *
8 * This work is based on the original dma-m2p implementation with
9 * following copyrights:
10 *
11 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
12 * Copyright (C) 2006 Applied Data Systems
13 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or (at
18 * your option) any later version.
19 */
20
21#include <linux/dmaengine.h>
22#include <linux/dma-mapping.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
26#include <linux/platform_device.h>
27
28#include <mach/dma.h>
29#include <mach/hardware.h>
30
31#define DMA_CHANNEL(_name, _base, _irq) \
32 { .name = (_name), .base = (_base), .irq = (_irq) }
33
34/*
35 * DMA M2P channels.
36 *
37 * On the EP93xx chip the following peripherals my be allocated to the 10
38 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
39 *
40 * I2S contains 3 Tx and 3 Rx DMA Channels
41 * AAC contains 3 Tx and 3 Rx DMA Channels
42 * UART1 contains 1 Tx and 1 Rx DMA Channels
43 * UART2 contains 1 Tx and 1 Rx DMA Channels
44 * UART3 contains 1 Tx and 1 Rx DMA Channels
45 * IrDA contains 1 Tx and 1 Rx DMA Channels
46 *
47 * Registers are mapped statically in ep93xx_map_io().
48 */
49static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
50 DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
51 DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
52 DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
53 DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
54 DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
55 DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
56 DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
57 DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
58 DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
59 DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
60};
61
62static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
63 .channels = ep93xx_dma_m2p_channels,
64 .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
65};
66
67static struct platform_device ep93xx_dma_m2p_device = {
68 .name = "ep93xx-dma-m2p",
69 .id = -1,
70 .dev = {
71 .platform_data = &ep93xx_dma_m2p_data,
72 },
73};
74
75/*
76 * DMA M2M channels.
77 *
78 * There are 2 M2M channels which support memcpy/memset and in addition simple
79 * hardware requests from/to SSP and IDE. We do not implement an external
80 * hardware requests.
81 *
82 * Registers are mapped statically in ep93xx_map_io().
83 */
84static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
85 DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
86 DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
87};
88
89static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
90 .channels = ep93xx_dma_m2m_channels,
91 .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
92};
93
94static struct platform_device ep93xx_dma_m2m_device = {
95 .name = "ep93xx-dma-m2m",
96 .id = -1,
97 .dev = {
98 .platform_data = &ep93xx_dma_m2m_data,
99 },
100};
101
102static int __init ep93xx_dma_init(void)
103{
104 platform_device_register(&ep93xx_dma_m2p_device);
105 platform_device_register(&ep93xx_dma_m2m_device);
106 return 0;
107}
108arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
deleted file mode 100644
index 415dce37b88c..000000000000
--- a/arch/arm/mach-ep93xx/gpio.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * linux/arch/arm/mach-ep93xx/gpio.c
3 *
4 * Generic EP93xx GPIO handling
5 *
6 * Copyright (c) 2008 Ryan Mallon <ryan@bluewatersys.com>
7 *
8 * Based on code originally from:
9 * linux/arch/arm/mach-ep93xx/core.c
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/seq_file.h>
21#include <linux/io.h>
22#include <linux/gpio.h>
23#include <linux/irq.h>
24
25#include <mach/hardware.h>
26
27/*************************************************************************
28 * Interrupt handling for EP93xx on-chip GPIOs
29 *************************************************************************/
30static unsigned char gpio_int_unmasked[3];
31static unsigned char gpio_int_enabled[3];
32static unsigned char gpio_int_type1[3];
33static unsigned char gpio_int_type2[3];
34static unsigned char gpio_int_debounce[3];
35
36/* Port ordering is: A B F */
37static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
38static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
39static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
40static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
41static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
42
43static void ep93xx_gpio_update_int_params(unsigned port)
44{
45 BUG_ON(port > 2);
46
47 __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
48
49 __raw_writeb(gpio_int_type2[port],
50 EP93XX_GPIO_REG(int_type2_register_offset[port]));
51
52 __raw_writeb(gpio_int_type1[port],
53 EP93XX_GPIO_REG(int_type1_register_offset[port]));
54
55 __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
56 EP93XX_GPIO_REG(int_en_register_offset[port]));
57}
58
59static inline void ep93xx_gpio_int_mask(unsigned line)
60{
61 gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
62}
63
64static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
65{
66 int line = irq_to_gpio(irq);
67 int port = line >> 3;
68 int port_mask = 1 << (line & 7);
69
70 if (enable)
71 gpio_int_debounce[port] |= port_mask;
72 else
73 gpio_int_debounce[port] &= ~port_mask;
74
75 __raw_writeb(gpio_int_debounce[port],
76 EP93XX_GPIO_REG(int_debounce_register_offset[port]));
77}
78
79static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
80{
81 unsigned char status;
82 int i;
83
84 status = __raw_readb(EP93XX_GPIO_A_INT_STATUS);
85 for (i = 0; i < 8; i++) {
86 if (status & (1 << i)) {
87 int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i;
88 generic_handle_irq(gpio_irq);
89 }
90 }
91
92 status = __raw_readb(EP93XX_GPIO_B_INT_STATUS);
93 for (i = 0; i < 8; i++) {
94 if (status & (1 << i)) {
95 int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i;
96 generic_handle_irq(gpio_irq);
97 }
98 }
99}
100
101static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
102{
103 /*
104 * map discontiguous hw irq range to continuous sw irq range:
105 *
106 * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
107 */
108 int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
109 int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
110
111 generic_handle_irq(gpio_irq);
112}
113
114static void ep93xx_gpio_irq_ack(struct irq_data *d)
115{
116 int line = irq_to_gpio(d->irq);
117 int port = line >> 3;
118 int port_mask = 1 << (line & 7);
119
120 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
121 gpio_int_type2[port] ^= port_mask; /* switch edge direction */
122 ep93xx_gpio_update_int_params(port);
123 }
124
125 __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
126}
127
128static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
129{
130 int line = irq_to_gpio(d->irq);
131 int port = line >> 3;
132 int port_mask = 1 << (line & 7);
133
134 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
135 gpio_int_type2[port] ^= port_mask; /* switch edge direction */
136
137 gpio_int_unmasked[port] &= ~port_mask;
138 ep93xx_gpio_update_int_params(port);
139
140 __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
141}
142
143static void ep93xx_gpio_irq_mask(struct irq_data *d)
144{
145 int line = irq_to_gpio(d->irq);
146 int port = line >> 3;
147
148 gpio_int_unmasked[port] &= ~(1 << (line & 7));
149 ep93xx_gpio_update_int_params(port);
150}
151
152static void ep93xx_gpio_irq_unmask(struct irq_data *d)
153{
154 int line = irq_to_gpio(d->irq);
155 int port = line >> 3;
156
157 gpio_int_unmasked[port] |= 1 << (line & 7);
158 ep93xx_gpio_update_int_params(port);
159}
160
161/*
162 * gpio_int_type1 controls whether the interrupt is level (0) or
163 * edge (1) triggered, while gpio_int_type2 controls whether it
164 * triggers on low/falling (0) or high/rising (1).
165 */
166static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
167{
168 const int gpio = irq_to_gpio(d->irq);
169 const int port = gpio >> 3;
170 const int port_mask = 1 << (gpio & 7);
171 irq_flow_handler_t handler;
172
173 gpio_direction_input(gpio);
174
175 switch (type) {
176 case IRQ_TYPE_EDGE_RISING:
177 gpio_int_type1[port] |= port_mask;
178 gpio_int_type2[port] |= port_mask;
179 handler = handle_edge_irq;
180 break;
181 case IRQ_TYPE_EDGE_FALLING:
182 gpio_int_type1[port] |= port_mask;
183 gpio_int_type2[port] &= ~port_mask;
184 handler = handle_edge_irq;
185 break;
186 case IRQ_TYPE_LEVEL_HIGH:
187 gpio_int_type1[port] &= ~port_mask;
188 gpio_int_type2[port] |= port_mask;
189 handler = handle_level_irq;
190 break;
191 case IRQ_TYPE_LEVEL_LOW:
192 gpio_int_type1[port] &= ~port_mask;
193 gpio_int_type2[port] &= ~port_mask;
194 handler = handle_level_irq;
195 break;
196 case IRQ_TYPE_EDGE_BOTH:
197 gpio_int_type1[port] |= port_mask;
198 /* set initial polarity based on current input level */
199 if (gpio_get_value(gpio))
200 gpio_int_type2[port] &= ~port_mask; /* falling */
201 else
202 gpio_int_type2[port] |= port_mask; /* rising */
203 handler = handle_edge_irq;
204 break;
205 default:
206 pr_err("failed to set irq type %d for gpio %d\n", type, gpio);
207 return -EINVAL;
208 }
209
210 __irq_set_handler_locked(d->irq, handler);
211
212 gpio_int_enabled[port] |= port_mask;
213
214 ep93xx_gpio_update_int_params(port);
215
216 return 0;
217}
218
219static struct irq_chip ep93xx_gpio_irq_chip = {
220 .name = "GPIO",
221 .irq_ack = ep93xx_gpio_irq_ack,
222 .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
223 .irq_mask = ep93xx_gpio_irq_mask,
224 .irq_unmask = ep93xx_gpio_irq_unmask,
225 .irq_set_type = ep93xx_gpio_irq_type,
226};
227
228void __init ep93xx_gpio_init_irq(void)
229{
230 int gpio_irq;
231
232 for (gpio_irq = gpio_to_irq(0);
233 gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
234 irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
235 handle_level_irq);
236 set_irq_flags(gpio_irq, IRQF_VALID);
237 }
238
239 irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
240 ep93xx_gpio_ab_irq_handler);
241 irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX,
242 ep93xx_gpio_f_irq_handler);
243 irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX,
244 ep93xx_gpio_f_irq_handler);
245 irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX,
246 ep93xx_gpio_f_irq_handler);
247 irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX,
248 ep93xx_gpio_f_irq_handler);
249 irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX,
250 ep93xx_gpio_f_irq_handler);
251 irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX,
252 ep93xx_gpio_f_irq_handler);
253 irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX,
254 ep93xx_gpio_f_irq_handler);
255 irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX,
256 ep93xx_gpio_f_irq_handler);
257}
258
259
260/*************************************************************************
261 * gpiolib interface for EP93xx on-chip GPIOs
262 *************************************************************************/
263struct ep93xx_gpio_chip {
264 struct gpio_chip chip;
265
266 void __iomem *data_reg;
267 void __iomem *data_dir_reg;
268};
269
270#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip)
271
272static int ep93xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
273{
274 struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
275 unsigned long flags;
276 u8 v;
277
278 local_irq_save(flags);
279 v = __raw_readb(ep93xx_chip->data_dir_reg);
280 v &= ~(1 << offset);
281 __raw_writeb(v, ep93xx_chip->data_dir_reg);
282 local_irq_restore(flags);
283
284 return 0;
285}
286
287static int ep93xx_gpio_direction_output(struct gpio_chip *chip,
288 unsigned offset, int val)
289{
290 struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
291 unsigned long flags;
292 int line;
293 u8 v;
294
295 local_irq_save(flags);
296
297 /* Set the value */
298 v = __raw_readb(ep93xx_chip->data_reg);
299 if (val)
300 v |= (1 << offset);
301 else
302 v &= ~(1 << offset);
303 __raw_writeb(v, ep93xx_chip->data_reg);
304
305 /* Drive as an output */
306 line = chip->base + offset;
307 if (line <= EP93XX_GPIO_LINE_MAX_IRQ) {
308 /* Ports A/B/F */
309 ep93xx_gpio_int_mask(line);
310 ep93xx_gpio_update_int_params(line >> 3);
311 }
312
313 v = __raw_readb(ep93xx_chip->data_dir_reg);
314 v |= (1 << offset);
315 __raw_writeb(v, ep93xx_chip->data_dir_reg);
316
317 local_irq_restore(flags);
318
319 return 0;
320}
321
322static int ep93xx_gpio_get(struct gpio_chip *chip, unsigned offset)
323{
324 struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
325
326 return !!(__raw_readb(ep93xx_chip->data_reg) & (1 << offset));
327}
328
329static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
330{
331 struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
332 unsigned long flags;
333 u8 v;
334
335 local_irq_save(flags);
336 v = __raw_readb(ep93xx_chip->data_reg);
337 if (val)
338 v |= (1 << offset);
339 else
340 v &= ~(1 << offset);
341 __raw_writeb(v, ep93xx_chip->data_reg);
342 local_irq_restore(flags);
343}
344
345static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
346 unsigned offset, unsigned debounce)
347{
348 int gpio = chip->base + offset;
349 int irq = gpio_to_irq(gpio);
350
351 if (irq < 0)
352 return -EINVAL;
353
354 ep93xx_gpio_int_debounce(irq, debounce ? true : false);
355
356 return 0;
357}
358
359#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \
360 { \
361 .chip = { \
362 .label = name, \
363 .direction_input = ep93xx_gpio_direction_input, \
364 .direction_output = ep93xx_gpio_direction_output, \
365 .get = ep93xx_gpio_get, \
366 .set = ep93xx_gpio_set, \
367 .base = base_gpio, \
368 .ngpio = 8, \
369 }, \
370 .data_reg = EP93XX_GPIO_REG(dr), \
371 .data_dir_reg = EP93XX_GPIO_REG(ddr), \
372 }
373
374static struct ep93xx_gpio_chip ep93xx_gpio_banks[] = {
375 EP93XX_GPIO_BANK("A", 0x00, 0x10, 0),
376 EP93XX_GPIO_BANK("B", 0x04, 0x14, 8),
377 EP93XX_GPIO_BANK("C", 0x08, 0x18, 40),
378 EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24),
379 EP93XX_GPIO_BANK("E", 0x20, 0x24, 32),
380 EP93XX_GPIO_BANK("F", 0x30, 0x34, 16),
381 EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48),
382 EP93XX_GPIO_BANK("H", 0x40, 0x44, 56),
383};
384
385void __init ep93xx_gpio_init(void)
386{
387 int i;
388
389 /* Set Ports C, D, E, G, and H for GPIO use */
390 ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
391 EP93XX_SYSCON_DEVCFG_GONK |
392 EP93XX_SYSCON_DEVCFG_EONIDE |
393 EP93XX_SYSCON_DEVCFG_GONIDE |
394 EP93XX_SYSCON_DEVCFG_HONIDE);
395
396 for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
397 struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
398
399 /*
400 * Ports A, B, and F support input debouncing when
401 * used as interrupts.
402 */
403 if (!strcmp(chip->label, "A") ||
404 !strcmp(chip->label, "B") ||
405 !strcmp(chip->label, "F"))
406 chip->set_debounce = ep93xx_gpio_set_debounce;
407
408 gpiochip_add(chip);
409 }
410}
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 5e31b2b25da9..46d4d876e6fb 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,149 +1,93 @@
1/**
2 * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
3 *
4 * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
5 * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
6 * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
7 * engine.
8 *
9 * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
10 *
11 */
12
13#ifndef __ASM_ARCH_DMA_H 1#ifndef __ASM_ARCH_DMA_H
14#define __ASM_ARCH_DMA_H 2#define __ASM_ARCH_DMA_H
15 3
16#include <linux/list.h>
17#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/dmaengine.h>
6#include <linux/dma-mapping.h>
18 7
19/** 8/*
20 * struct ep93xx_dma_buffer - Information about a buffer to be transferred 9 * M2P channels.
21 * using the DMA M2P engine
22 * 10 *
23 * @list: Entry in DMA buffer list 11 * Note that these values are also directly used for setting the PPALLOC
24 * @bus_addr: Physical address of the buffer 12 * register.
25 * @size: Size of the buffer in bytes
26 */ 13 */
27struct ep93xx_dma_buffer { 14#define EP93XX_DMA_I2S1 0
28 struct list_head list; 15#define EP93XX_DMA_I2S2 1
29 u32 bus_addr; 16#define EP93XX_DMA_AAC1 2
30 u16 size; 17#define EP93XX_DMA_AAC2 3
31}; 18#define EP93XX_DMA_AAC3 4
19#define EP93XX_DMA_I2S3 5
20#define EP93XX_DMA_UART1 6
21#define EP93XX_DMA_UART2 7
22#define EP93XX_DMA_UART3 8
23#define EP93XX_DMA_IRDA 9
24/* M2M channels */
25#define EP93XX_DMA_SSP 10
26#define EP93XX_DMA_IDE 11
32 27
33/** 28/**
34 * struct ep93xx_dma_m2p_client - Information about a DMA M2P client 29 * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
35 * 30 * @port: peripheral which is requesting the channel
36 * @name: Unique name for this client 31 * @direction: TX/RX channel
37 * @flags: Client flags 32 * @name: optional name for the channel, this is displayed in /proc/interrupts
38 * @cookie: User data to pass to callback functions 33 *
39 * @buffer_started: Non NULL function to call when a transfer is started. 34 * This information is passed as private channel parameter in a filter
40 * The arguments are the user data cookie and the DMA 35 * function. Note that this is only needed for slave/cyclic channels. For
41 * buffer which is starting. 36 * memcpy channels %NULL data should be passed.
42 * @buffer_finished: Non NULL function to call when a transfer is completed.
43 * The arguments are the user data cookie, the DMA buffer
44 * which has completed, and a boolean flag indicating if
45 * the transfer had an error.
46 */ 37 */
47struct ep93xx_dma_m2p_client { 38struct ep93xx_dma_data {
48 char *name; 39 int port;
49 u8 flags; 40 enum dma_data_direction direction;
50 void *cookie; 41 const char *name;
51 void (*buffer_started)(void *cookie,
52 struct ep93xx_dma_buffer *buf);
53 void (*buffer_finished)(void *cookie,
54 struct ep93xx_dma_buffer *buf,
55 int bytes, int error);
56
57 /* private: Internal use only */
58 void *channel;
59}; 42};
60 43
61/* DMA M2P ports */
62#define EP93XX_DMA_M2P_PORT_I2S1 0x00
63#define EP93XX_DMA_M2P_PORT_I2S2 0x01
64#define EP93XX_DMA_M2P_PORT_AAC1 0x02
65#define EP93XX_DMA_M2P_PORT_AAC2 0x03
66#define EP93XX_DMA_M2P_PORT_AAC3 0x04
67#define EP93XX_DMA_M2P_PORT_I2S3 0x05
68#define EP93XX_DMA_M2P_PORT_UART1 0x06
69#define EP93XX_DMA_M2P_PORT_UART2 0x07
70#define EP93XX_DMA_M2P_PORT_UART3 0x08
71#define EP93XX_DMA_M2P_PORT_IRDA 0x09
72#define EP93XX_DMA_M2P_PORT_MASK 0x0f
73
74/* DMA M2P client flags */
75#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
76#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
77
78/*
79 * DMA M2P client error handling flags. See the EP93xx users guide
80 * documentation on the DMA M2P CONTROL register for more details
81 */
82#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
83#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
84#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
85
86/** 44/**
87 * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P 45 * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
88 * subsystem 46 * @name: name of the channel, used for getting the right clock for the channel
89 * 47 * @base: mapped registers
90 * @m2p: Client information to register 48 * @irq: interrupt number used by this channel
91 * returns 0 on success
92 *
93 * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
94 * client
95 */ 49 */
96int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); 50struct ep93xx_dma_chan_data {
51 const char *name;
52 void __iomem *base;
53 int irq;
54};
97 55
98/** 56/**
99 * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P 57 * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
100 * subsystem 58 * @channels: array of channels which are passed to the driver
101 * 59 * @num_channels: number of channels in the array
102 * @m2p: Client to unregister
103 * 60 *
104 * Any transfers currently in progress will be completed in hardware, but 61 * This structure is passed to the DMA engine driver via platform data. For
105 * ignored in software. 62 * M2P channels, contract is that even channels are for TX and odd for RX.
63 * There is no requirement for the M2M channels.
106 */ 64 */
107void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); 65struct ep93xx_dma_platform_data {
66 struct ep93xx_dma_chan_data *channels;
67 size_t num_channels;
68};
108 69
109/** 70static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
110 * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer 71{
111 * 72 return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
112 * @m2p: DMA Client to submit the transfer on 73}
113 * @buf: DMA Buffer to submit
114 *
115 * If the current or next transfer positions are free on the M2P client then
116 * the transfer is started immediately. If not, the transfer is added to the
117 * list of pending transfers. This function must not be called from the
118 * buffer_finished callback for an M2P channel.
119 *
120 */
121void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
122 struct ep93xx_dma_buffer *buf);
123 74
124/** 75/**
125 * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list 76 * ep93xx_dma_chan_direction - returns direction the channel can be used
126 * for an M2P channel 77 * @chan: channel
127 * 78 *
128 * @m2p: DMA Client to submit the transfer on 79 * This function can be used in filter functions to find out whether the
129 * @buf: DMA Buffer to submit 80 * channel supports given DMA direction. Only M2P channels have such
130 * 81 * limitation, for M2M channels the direction is configurable.
131 * This function must only be called from the buffer_finished callback for an
132 * M2P channel. It is commonly used to add the next transfer in a chained list
133 * of DMA transfers.
134 */ 82 */
135void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, 83static inline enum dma_data_direction
136 struct ep93xx_dma_buffer *buf); 84ep93xx_dma_chan_direction(struct dma_chan *chan)
85{
86 if (!ep93xx_dma_chan_is_m2p(chan))
87 return DMA_NONE;
137 88
138/** 89 /* even channels are for TX, odd for RX */
139 * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client 90 return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
140 * 91}
141 * @m2p: DMA client to flush transfers on
142 *
143 * Any transfers currently in progress will be completed in hardware, but
144 * ignored in software.
145 *
146 */
147void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
148 92
149#endif /* __ASM_ARCH_DMA_H */ 93#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 9ac4d1055097..c4a7b84ef06d 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -98,6 +98,7 @@
98 98
99#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000) 99#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
100 100
101#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000)
101#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) 102#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
102#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) 103#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
103#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c) 104#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
index 0a37961b3453..9bb63ac13f04 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
@@ -7,9 +7,11 @@ struct spi_device;
7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor 7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor
8 * @num_chipselect: number of chip selects on this board, must be 8 * @num_chipselect: number of chip selects on this board, must be
9 * at least one 9 * at least one
10 * @use_dma: use DMA for the transfers
10 */ 11 */
11struct ep93xx_spi_info { 12struct ep93xx_spi_info {
12 int num_chipselect; 13 int num_chipselect;
14 bool use_dma;
13}; 15};
14 16
15/** 17/**
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 9babe4473e88..bfd621460abf 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -23,6 +23,7 @@
23#include <plat/sdhci.h> 23#include <plat/sdhci.h>
24#include <plat/devs.h> 24#include <plat/devs.h>
25#include <plat/fimc-core.h> 25#include <plat/fimc-core.h>
26#include <plat/iic-core.h>
26 27
27#include <mach/regs-irq.h> 28#include <mach/regs-irq.h>
28 29
@@ -132,6 +133,11 @@ void __init exynos4_map_io(void)
132 s3c_fimc_setname(1, "exynos4-fimc"); 133 s3c_fimc_setname(1, "exynos4-fimc");
133 s3c_fimc_setname(2, "exynos4-fimc"); 134 s3c_fimc_setname(2, "exynos4-fimc");
134 s3c_fimc_setname(3, "exynos4-fimc"); 135 s3c_fimc_setname(3, "exynos4-fimc");
136
137 /* The I2C bus controllers are directly compatible with s3c2440 */
138 s3c_i2c0_setname("s3c2440-i2c");
139 s3c_i2c1_setname("s3c2440-i2c");
140 s3c_i2c2_setname("s3c2440-i2c");
135} 141}
136 142
137void __init exynos4_init_clocks(int xtal) 143void __init exynos4_init_clocks(int xtal)
diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c
index 1eed5f9f7bd3..983069a53239 100644
--- a/arch/arm/mach-exynos4/dev-audio.c
+++ b/arch/arm/mach-exynos4/dev-audio.c
@@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = {
330 330
331static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) 331static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
332{ 332{
333 s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3)); 333 s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
334 334
335 return 0; 335 return 0;
336} 336}
diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S
index 6c6cfc50c46b..3cdeb3647542 100644
--- a/arch/arm/mach-exynos4/headsmp.S
+++ b/arch/arm/mach-exynos4/headsmp.S
@@ -13,7 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16 __INIT 16 __CPUINIT
17 17
18/* 18/*
19 * exynos4 specific entry point for secondary CPUs. This provides 19 * exynos4 specific entry point for secondary CPUs. This provides
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index 152676471b67..edd814110da8 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
78}; 78};
79 79
80static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { 80static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
81 .cd_type = S3C_SDHCI_CD_GPIO, 81 .cd_type = S3C_SDHCI_CD_INTERNAL,
82 .ext_cd_gpio = EXYNOS4_GPK0(2),
83 .ext_cd_gpio_invert = 1,
84 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 82 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
85#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT 83#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
86 .max_width = 8, 84 .max_width = 8,
@@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
96}; 94};
97 95
98static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { 96static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
99 .cd_type = S3C_SDHCI_CD_GPIO, 97 .cd_type = S3C_SDHCI_CD_INTERNAL,
100 .ext_cd_gpio = EXYNOS4_GPK2(2),
101 .ext_cd_gpio_invert = 1,
102 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 98 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
103#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT 99#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
104 .max_width = 8, 100 .max_width = 8,
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index 46adca068f2c..dc26fff22cf0 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -5,6 +5,7 @@ menu "Footbridge Implementations"
5config ARCH_CATS 5config ARCH_CATS
6 bool "CATS" 6 bool "CATS"
7 select CLKSRC_I8253 7 select CLKSRC_I8253
8 select CLKEVT_I8253
8 select FOOTBRIDGE_HOST 9 select FOOTBRIDGE_HOST
9 select ISA 10 select ISA
10 select ISA_DMA 11 select ISA_DMA
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c
index 7020f1a3feca..c40bb415f4b5 100644
--- a/arch/arm/mach-footbridge/isa-timer.c
+++ b/arch/arm/mach-footbridge/isa-timer.c
@@ -5,64 +5,18 @@
5 * Copyright (C) 1998 Phil Blundell 5 * Copyright (C) 1998 Phil Blundell
6 */ 6 */
7#include <linux/clockchips.h> 7#include <linux/clockchips.h>
8#include <linux/clocksource.h> 8#include <linux/i8253.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/io.h>
13#include <linux/spinlock.h> 12#include <linux/spinlock.h>
14#include <linux/timex.h> 13#include <linux/timex.h>
15 14
16#include <asm/irq.h> 15#include <asm/irq.h>
17#include <asm/i8253.h>
18#include <asm/mach/time.h> 16#include <asm/mach/time.h>
19 17
20#include "common.h" 18#include "common.h"
21 19
22DEFINE_RAW_SPINLOCK(i8253_lock);
23
24static void pit_set_mode(enum clock_event_mode mode,
25 struct clock_event_device *evt)
26{
27 unsigned long flags;
28
29 raw_local_irq_save(flags);
30
31 switch (mode) {
32 case CLOCK_EVT_MODE_PERIODIC:
33 outb_p(0x34, PIT_MODE);
34 outb_p(PIT_LATCH & 0xff, PIT_CH0);
35 outb_p(PIT_LATCH >> 8, PIT_CH0);
36 break;
37
38 case CLOCK_EVT_MODE_SHUTDOWN:
39 case CLOCK_EVT_MODE_UNUSED:
40 outb_p(0x30, PIT_MODE);
41 outb_p(0, PIT_CH0);
42 outb_p(0, PIT_CH0);
43 break;
44
45 case CLOCK_EVT_MODE_ONESHOT:
46 case CLOCK_EVT_MODE_RESUME:
47 break;
48 }
49 local_irq_restore(flags);
50}
51
52static int pit_set_next_event(unsigned long delta,
53 struct clock_event_device *evt)
54{
55 return 0;
56}
57
58static struct clock_event_device pit_ce = {
59 .name = "pit",
60 .features = CLOCK_EVT_FEAT_PERIODIC,
61 .set_mode = pit_set_mode,
62 .set_next_event = pit_set_next_event,
63 .shift = 32,
64};
65
66static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) 20static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
67{ 21{
68 struct clock_event_device *ce = dev_id; 22 struct clock_event_device *ce = dev_id;
@@ -74,20 +28,15 @@ static struct irqaction pit_timer_irq = {
74 .name = "pit", 28 .name = "pit",
75 .handler = pit_timer_interrupt, 29 .handler = pit_timer_interrupt,
76 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 30 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
77 .dev_id = &pit_ce, 31 .dev_id = &i8253_clockevent,
78}; 32};
79 33
80static void __init isa_timer_init(void) 34static void __init isa_timer_init(void)
81{ 35{
82 pit_ce.cpumask = cpumask_of(smp_processor_id());
83 pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
84 pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
85 pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
86
87 clocksource_i8253_init(); 36 clocksource_i8253_init();
88 37
89 setup_irq(pit_ce.irq, &pit_timer_irq); 38 setup_irq(i8253_clockevent.irq, &pit_timer_irq);
90 clockevents_register_device(&pit_ce); 39 clockevent_i8253_init(false);
91} 40}
92 41
93struct sys_timer isa_timer = { 42struct sys_timer isa_timer = {
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
index a65838fc061c..af1c580b06bc 100644
--- a/arch/arm/mach-imx/clock-imx25.c
+++ b/arch/arm/mach-imx/clock-imx25.c
@@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = {
282 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) 282 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
283 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) 283 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
284 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) 284 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
285 _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk) 285 /* i.mx25 has the i.mx35 type cspi */
286 _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk) 286 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
287 _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk) 287 _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
288 _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
288 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk) 289 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
289 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk) 290 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
290 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk) 291 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
index 236f1495efad..f8aa5be0eb15 100644
--- a/arch/arm/mach-imx/dma-v1.c
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/err.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 15e45c84e371..59d2a3b137d9 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -115,6 +115,8 @@ static struct platform_device *devices[] __initdata = {
115 115
116static void __init apf9328_init(void) 116static void __init apf9328_init(void)
117{ 117{
118 imx1_soc_init();
119
118 mxc_gpio_setup_multiple_pins(apf9328_pins, 120 mxc_gpio_setup_multiple_pins(apf9328_pins,
119 ARRAY_SIZE(apf9328_pins), 121 ARRAY_SIZE(apf9328_pins),
120 "APF9328"); 122 "APF9328");
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index ffb40ff619b1..ede2710f8b76 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -490,6 +490,8 @@ static struct platform_device *devices[] __initdata = {
490 */ 490 */
491static void __init armadillo5x0_init(void) 491static void __init armadillo5x0_init(void)
492{ 492{
493 imx31_soc_init();
494
493 mxc_iomux_setup_multiple_pins(armadillo5x0_pins, 495 mxc_iomux_setup_multiple_pins(armadillo5x0_pins,
494 ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0"); 496 ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
495 497
diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c
index 42e4f078a19c..f49470553bdf 100644
--- a/arch/arm/mach-imx/mach-bug.c
+++ b/arch/arm/mach-imx/mach-bug.c
@@ -42,6 +42,8 @@ static const unsigned int bug_pins[] __initconst = {
42 42
43static void __init bug_board_init(void) 43static void __init bug_board_init(void)
44{ 44{
45 imx31_soc_init();
46
45 mxc_iomux_setup_multiple_pins(bug_pins, 47 mxc_iomux_setup_multiple_pins(bug_pins,
46 ARRAY_SIZE(bug_pins), "uart-4"); 48 ARRAY_SIZE(bug_pins), "uart-4");
47 imx31_add_imx_uart4(&uart_pdata); 49 imx31_add_imx_uart4(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 46a2e41d43d2..87887ac5806b 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -250,6 +250,8 @@ __setup("otg_mode=", eukrea_cpuimx27_otg_mode);
250 250
251static void __init eukrea_cpuimx27_init(void) 251static void __init eukrea_cpuimx27_init(void)
252{ 252{
253 imx27_soc_init();
254
253 mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins, 255 mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins,
254 ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27"); 256 ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27");
255 257
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 3f8ef825fa6f..f39a478ba1a6 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -156,6 +156,8 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode);
156 */ 156 */
157static void __init eukrea_cpuimx35_init(void) 157static void __init eukrea_cpuimx35_init(void)
158{ 158{
159 imx35_soc_init();
160
159 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads, 161 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads,
160 ARRAY_SIZE(eukrea_cpuimx35_pads)); 162 ARRAY_SIZE(eukrea_cpuimx35_pads));
161 163
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index 148cff2819b9..da36da52969d 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -125,6 +125,8 @@ __setup("otg_mode=", eukrea_cpuimx25_otg_mode);
125 125
126static void __init eukrea_cpuimx25_init(void) 126static void __init eukrea_cpuimx25_init(void)
127{ 127{
128 imx25_soc_init();
129
128 if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads, 130 if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads,
129 ARRAY_SIZE(eukrea_cpuimx25_pads))) 131 ARRAY_SIZE(eukrea_cpuimx25_pads)))
130 printk(KERN_ERR "error setting cpuimx25 pads !\n"); 132 printk(KERN_ERR "error setting cpuimx25 pads !\n");
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 7ae43b1ec517..c6269d60ddbc 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -231,6 +231,8 @@ static void __init visstrim_m10_board_init(void)
231{ 231{
232 int ret; 232 int ret;
233 233
234 imx27_soc_init();
235
234 ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins, 236 ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins,
235 ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10"); 237 ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10");
236 if (ret) 238 if (ret)
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index 9be6cd6fbf8c..272f793e9247 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -50,6 +50,8 @@ static const int mx27ipcam_pins[] __initconst = {
50 50
51static void __init mx27ipcam_init(void) 51static void __init mx27ipcam_init(void)
52{ 52{
53 imx27_soc_init();
54
53 mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins), 55 mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins),
54 "mx27ipcam"); 56 "mx27ipcam");
55 57
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 841140516ede..d81a769fe895 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -59,6 +59,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
59 59
60static void __init mx27lite_init(void) 60static void __init mx27lite_init(void)
61{ 61{
62 imx27_soc_init();
63
62 mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins), 64 mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins),
63 "imx27lite"); 65 "imx27lite");
64 imx27_add_imx_uart0(&uart_pdata); 66 imx27_add_imx_uart0(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 1ecae20cf4e3..e472a1d88058 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -223,6 +223,8 @@ static int kzm_pins[] __initdata = {
223 */ 223 */
224static void __init kzm_board_init(void) 224static void __init kzm_board_init(void)
225{ 225{
226 imx31_soc_init();
227
226 mxc_iomux_setup_multiple_pins(kzm_pins, 228 mxc_iomux_setup_multiple_pins(kzm_pins,
227 ARRAY_SIZE(kzm_pins), "kzm"); 229 ARRAY_SIZE(kzm_pins), "kzm");
228 kzm_init_ext_uart(); 230 kzm_init_ext_uart();
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 38ec5cbbda9b..5cd8bee46960 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -115,6 +115,8 @@ static struct i2c_board_info mx1ads_i2c_devices[] = {
115 */ 115 */
116static void __init mx1ads_init(void) 116static void __init mx1ads_init(void)
117{ 117{
118 imx1_soc_init();
119
118 mxc_gpio_setup_multiple_pins(mx1ads_pins, 120 mxc_gpio_setup_multiple_pins(mx1ads_pins,
119 ARRAY_SIZE(mx1ads_pins), "mx1ads"); 121 ARRAY_SIZE(mx1ads_pins), "mx1ads");
120 122
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 74ac88978ddd..d389ecf9b5a8 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -279,6 +279,8 @@ static struct platform_device *platform_devices[] __initdata = {
279 279
280static void __init mx21ads_board_init(void) 280static void __init mx21ads_board_init(void)
281{ 281{
282 imx21_soc_init();
283
282 mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins), 284 mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins),
283 "mx21ads"); 285 "mx21ads");
284 286
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 58ea3fdf0911..01534bb61305 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -219,6 +219,8 @@ static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
219 219
220static void __init mx25pdk_init(void) 220static void __init mx25pdk_init(void)
221{ 221{
222 imx25_soc_init();
223
222 mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads, 224 mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
223 ARRAY_SIZE(mx25pdk_pads)); 225 ARRAY_SIZE(mx25pdk_pads));
224 226
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 6e1accf93f81..117ce0a50f4e 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -267,6 +267,8 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = {
267 267
268static void __init mx27pdk_init(void) 268static void __init mx27pdk_init(void)
269{ 269{
270 imx27_soc_init();
271
270 mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins), 272 mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
271 "mx27pdk"); 273 "mx27pdk");
272 mx27_3ds_sdhc1_enable_level_translator(); 274 mx27_3ds_sdhc1_enable_level_translator();
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 1db79506f5e4..fc26ed71b9ed 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -288,6 +288,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
288 288
289static void __init mx27ads_board_init(void) 289static void __init mx27ads_board_init(void)
290{ 290{
291 imx27_soc_init();
292
291 mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins), 293 mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins),
292 "mx27ads"); 294 "mx27ads");
293 295
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 9b982449cb52..441fbb83f39c 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/clk.h> 19#include <linux/clk.h>
@@ -689,6 +690,8 @@ static void __init mx31_3ds_init(void)
689{ 690{
690 int ret; 691 int ret;
691 692
693 imx31_soc_init();
694
692 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), 695 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
693 "mx31_3ds"); 696 "mx31_3ds");
694 697
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index f4dee0254634..0ce49478a479 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -516,6 +516,8 @@ static void __init mx31ads_init_irq(void)
516 516
517static void __init mx31ads_init(void) 517static void __init mx31ads_init(void)
518{ 518{
519 imx31_soc_init();
520
519 mxc_init_extuart(); 521 mxc_init_extuart();
520 mxc_init_imx_uart(); 522 mxc_init_imx_uart();
521 mxc_init_i2c(); 523 mxc_init_i2c();
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 410e676ae087..750368ddf0f9 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -243,6 +243,8 @@ core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
243 243
244static void __init mx31lilly_board_init(void) 244static void __init mx31lilly_board_init(void)
245{ 245{
246 imx31_soc_init();
247
246 switch (mx31lilly_baseboard) { 248 switch (mx31lilly_baseboard) {
247 case MX31LILLY_NOBOARD: 249 case MX31LILLY_NOBOARD:
248 break; 250 break;
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index ac9b4cad320e..4b47fd9fdd89 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -230,6 +230,8 @@ static void __init mx31lite_init(void)
230{ 230{
231 int ret; 231 int ret;
232 232
233 imx31_soc_init();
234
233 switch (mx31lite_baseboard) { 235 switch (mx31lite_baseboard) {
234 case MX31LITE_NOBOARD: 236 case MX31LITE_NOBOARD:
235 break; 237 break;
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index eaa51e49ca95..a52fd36e2b52 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -507,6 +507,8 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444);
507 */ 507 */
508static void __init mx31moboard_init(void) 508static void __init mx31moboard_init(void)
509{ 509{
510 imx31_soc_init();
511
510 mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins), 512 mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins),
511 "moboard"); 513 "moboard");
512 514
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 882880ac1bbc..48b3c6fd5cf0 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -179,6 +179,8 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = {
179 */ 179 */
180static void __init mx35_3ds_init(void) 180static void __init mx35_3ds_init(void)
181{ 181{
182 imx35_soc_init();
183
182 mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads)); 184 mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
183 185
184 imx35_add_fec(NULL); 186 imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 2774541511e7..c85876fed663 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -233,6 +233,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
233 233
234static void __init mxt_td60_board_init(void) 234static void __init mxt_td60_board_init(void)
235{ 235{
236 imx27_soc_init();
237
236 mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins), 238 mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins),
237 "MXT_TD60"); 239 "MXT_TD60");
238 240
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index bbddc5a11c43..71083aa16038 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -357,6 +357,8 @@ static void __init pca100_init(void)
357{ 357{
358 int ret; 358 int ret;
359 359
360 imx27_soc_init();
361
360 /* SSI unit */ 362 /* SSI unit */
361 mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, 363 mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
362 MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */ 364 MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 89c213b81295..f45b7cd72c8a 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -576,6 +576,8 @@ static void __init pcm037_init(void)
576{ 576{
577 int ret; 577 int ret;
578 578
579 imx31_soc_init();
580
579 mxc_iomux_set_gpr(MUX_PGP_UH2, 1); 581 mxc_iomux_set_gpr(MUX_PGP_UH2, 1);
580 582
581 mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins), 583 mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins),
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 853bb871c7ed..2d6a64bbac44 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -295,6 +295,8 @@ static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
295 295
296static void __init pcm038_init(void) 296static void __init pcm038_init(void)
297{ 297{
298 imx27_soc_init();
299
298 mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins), 300 mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins),
299 "PCM038"); 301 "PCM038");
300 302
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 026441628dfa..163cc318cafb 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -356,6 +356,8 @@ static struct esdhc_platform_data sd1_pdata = {
356 */ 356 */
357static void __init pcm043_init(void) 357static void __init pcm043_init(void)
358{ 358{
359 imx35_soc_init();
360
359 mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads)); 361 mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
360 362
361 mxc_audmux_v2_configure_port(3, 363 mxc_audmux_v2_configure_port(3,
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index c16328715939..3626f486498a 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -244,6 +244,8 @@ static void __init qong_init_fpga(void)
244 */ 244 */
245static void __init qong_init(void) 245static void __init qong_init(void)
246{ 246{
247 imx31_soc_init();
248
247 mxc_init_imx_uart(); 249 mxc_init_imx_uart();
248 qong_init_nor_mtd(); 250 qong_init_nor_mtd();
249 qong_init_fpga(); 251 qong_init_fpga();
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index dcaee043628e..82805260e19c 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -129,6 +129,8 @@ static struct platform_device *devices[] __initdata = {
129 */ 129 */
130static void __init scb9328_init(void) 130static void __init scb9328_init(void)
131{ 131{
132 imx1_soc_init();
133
132 imx1_add_imx_uart0(&uart_pdata); 134 imx1_add_imx_uart0(&uart_pdata);
133 135
134 printk(KERN_INFO"Scb9328: Adding devices\n"); 136 printk(KERN_INFO"Scb9328: Adding devices\n");
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index d74e3473d236..7d8e012a6335 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -267,6 +267,8 @@ static struct platform_device *devices[] __initdata = {
267 */ 267 */
268static void __init vpr200_board_init(void) 268static void __init vpr200_board_init(void)
269{ 269{
270 imx35_soc_init();
271
270 mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads)); 272 mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads));
271 273
272 imx35_add_fec(NULL); 274 imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index 2e482ba5a0e7..2bded591d5c2 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -23,7 +23,6 @@
23 23
24#include <mach/common.h> 24#include <mach/common.h>
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/gpio.h>
27#include <mach/irqs.h> 26#include <mach/irqs.h>
28#include <mach/iomux-v1.h> 27#include <mach/iomux-v1.h>
29 28
@@ -44,15 +43,19 @@ void __init imx1_init_early(void)
44 MX1_NUM_GPIO_PORT); 43 MX1_NUM_GPIO_PORT);
45} 44}
46 45
47static struct mxc_gpio_port imx1_gpio_ports[] = {
48 DEFINE_IMX_GPIO_PORT_IRQ(MX1, 0, 1, MX1_GPIO_INT_PORTA),
49 DEFINE_IMX_GPIO_PORT_IRQ(MX1, 1, 2, MX1_GPIO_INT_PORTB),
50 DEFINE_IMX_GPIO_PORT_IRQ(MX1, 2, 3, MX1_GPIO_INT_PORTC),
51 DEFINE_IMX_GPIO_PORT_IRQ(MX1, 3, 4, MX1_GPIO_INT_PORTD),
52};
53
54void __init mx1_init_irq(void) 46void __init mx1_init_irq(void)
55{ 47{
56 mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR)); 48 mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
57 mxc_gpio_init(imx1_gpio_ports, ARRAY_SIZE(imx1_gpio_ports)); 49}
50
51void __init imx1_soc_init(void)
52{
53 mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
54 MX1_GPIO_INT_PORTA, 0);
55 mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256,
56 MX1_GPIO_INT_PORTB, 0);
57 mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256,
58 MX1_GPIO_INT_PORTC, 0);
59 mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256,
60 MX1_GPIO_INT_PORTD, 0);
58} 61}
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index 7a0c500ac2c8..6d7d518686a5 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -24,7 +24,6 @@
24#include <mach/common.h> 24#include <mach/common.h>
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/mach/map.h> 26#include <asm/mach/map.h>
27#include <mach/gpio.h>
28#include <mach/irqs.h> 27#include <mach/irqs.h>
29#include <mach/iomux-v1.h> 28#include <mach/iomux-v1.h>
30 29
@@ -70,17 +69,17 @@ void __init imx21_init_early(void)
70 MX21_NUM_GPIO_PORT); 69 MX21_NUM_GPIO_PORT);
71} 70}
72 71
73static struct mxc_gpio_port imx21_gpio_ports[] = {
74 DEFINE_IMX_GPIO_PORT_IRQ(MX21, 0, 1, MX21_INT_GPIO),
75 DEFINE_IMX_GPIO_PORT(MX21, 1, 2),
76 DEFINE_IMX_GPIO_PORT(MX21, 2, 3),
77 DEFINE_IMX_GPIO_PORT(MX21, 3, 4),
78 DEFINE_IMX_GPIO_PORT(MX21, 4, 5),
79 DEFINE_IMX_GPIO_PORT(MX21, 5, 6),
80};
81
82void __init mx21_init_irq(void) 72void __init mx21_init_irq(void)
83{ 73{
84 mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR)); 74 mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR));
85 mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports)); 75}
76
77void __init imx21_soc_init(void)
78{
79 mxc_register_gpio("imx21-gpio", 0, MX21_GPIO1_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
80 mxc_register_gpio("imx21-gpio", 1, MX21_GPIO2_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
81 mxc_register_gpio("imx21-gpio", 2, MX21_GPIO3_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
82 mxc_register_gpio("imx21-gpio", 3, MX21_GPIO4_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
83 mxc_register_gpio("imx21-gpio", 4, MX21_GPIO5_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
84 mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
86} 85}
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 02f7b5c7fa8e..9a1591c2508d 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -27,7 +27,6 @@
27#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <mach/mx25.h> 28#include <mach/mx25.h>
29#include <mach/iomux-v3.h> 29#include <mach/iomux-v3.h>
30#include <mach/gpio.h>
31#include <mach/irqs.h> 30#include <mach/irqs.h>
32 31
33/* 32/*
@@ -57,16 +56,16 @@ void __init imx25_init_early(void)
57 mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR)); 56 mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR));
58} 57}
59 58
60static struct mxc_gpio_port imx25_gpio_ports[] = {
61 DEFINE_IMX_GPIO_PORT_IRQ(MX25, 0, 1, MX25_INT_GPIO1),
62 DEFINE_IMX_GPIO_PORT_IRQ(MX25, 1, 2, MX25_INT_GPIO2),
63 DEFINE_IMX_GPIO_PORT_IRQ(MX25, 2, 3, MX25_INT_GPIO3),
64 DEFINE_IMX_GPIO_PORT_IRQ(MX25, 3, 4, MX25_INT_GPIO4),
65};
66
67void __init mx25_init_irq(void) 59void __init mx25_init_irq(void)
68{ 60{
69 mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR)); 61 mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
70 mxc_gpio_init(imx25_gpio_ports, ARRAY_SIZE(imx25_gpio_ports));
71} 62}
72 63
64void __init imx25_soc_init(void)
65{
66 /* i.mx25 has the i.mx31 type gpio */
67 mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
68 mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
69 mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
70 mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
71}
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index a6761a39f08c..133b30003ddb 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -24,7 +24,6 @@
24#include <mach/common.h> 24#include <mach/common.h>
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/mach/map.h> 26#include <asm/mach/map.h>
27#include <mach/gpio.h>
28#include <mach/irqs.h> 27#include <mach/irqs.h>
29#include <mach/iomux-v1.h> 28#include <mach/iomux-v1.h>
30 29
@@ -70,17 +69,18 @@ void __init imx27_init_early(void)
70 MX27_NUM_GPIO_PORT); 69 MX27_NUM_GPIO_PORT);
71} 70}
72 71
73static struct mxc_gpio_port imx27_gpio_ports[] = {
74 DEFINE_IMX_GPIO_PORT_IRQ(MX27, 0, 1, MX27_INT_GPIO),
75 DEFINE_IMX_GPIO_PORT(MX27, 1, 2),
76 DEFINE_IMX_GPIO_PORT(MX27, 2, 3),
77 DEFINE_IMX_GPIO_PORT(MX27, 3, 4),
78 DEFINE_IMX_GPIO_PORT(MX27, 4, 5),
79 DEFINE_IMX_GPIO_PORT(MX27, 5, 6),
80};
81
82void __init mx27_init_irq(void) 72void __init mx27_init_irq(void)
83{ 73{
84 mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR)); 74 mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR));
85 mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports)); 75}
76
77void __init imx27_soc_init(void)
78{
79 /* i.mx27 has the i.mx21 type gpio */
80 mxc_register_gpio("imx21-gpio", 0, MX27_GPIO1_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
81 mxc_register_gpio("imx21-gpio", 1, MX27_GPIO2_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
82 mxc_register_gpio("imx21-gpio", 2, MX27_GPIO3_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
83 mxc_register_gpio("imx21-gpio", 3, MX27_GPIO4_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
84 mxc_register_gpio("imx21-gpio", 4, MX27_GPIO5_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
85 mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
86} 86}
diff --git a/arch/arm/mach-imx/mm-imx31.c b/arch/arm/mach-imx/mm-imx31.c
index 86b9b45864d2..6d103c01b8b9 100644
--- a/arch/arm/mach-imx/mm-imx31.c
+++ b/arch/arm/mach-imx/mm-imx31.c
@@ -26,7 +26,6 @@
26#include <mach/common.h> 26#include <mach/common.h>
27#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <mach/iomux-v3.h> 28#include <mach/iomux-v3.h>
29#include <mach/gpio.h>
30#include <mach/irqs.h> 29#include <mach/irqs.h>
31 30
32static struct map_desc mx31_io_desc[] __initdata = { 31static struct map_desc mx31_io_desc[] __initdata = {
@@ -53,14 +52,14 @@ void __init imx31_init_early(void)
53 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); 52 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
54} 53}
55 54
56static struct mxc_gpio_port imx31_gpio_ports[] = {
57 DEFINE_IMX_GPIO_PORT_IRQ(MX31, 0, 1, MX31_INT_GPIO1),
58 DEFINE_IMX_GPIO_PORT_IRQ(MX31, 1, 2, MX31_INT_GPIO2),
59 DEFINE_IMX_GPIO_PORT_IRQ(MX31, 2, 3, MX31_INT_GPIO3),
60};
61
62void __init mx31_init_irq(void) 55void __init mx31_init_irq(void)
63{ 56{
64 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); 57 mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
65 mxc_gpio_init(imx31_gpio_ports, ARRAY_SIZE(imx31_gpio_ports)); 58}
59
60void __init imx31_soc_init(void)
61{
62 mxc_register_gpio("imx31-gpio", 0, MX31_GPIO1_BASE_ADDR, SZ_16K, MX31_INT_GPIO1, 0);
63 mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
64 mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
66} 65}
diff --git a/arch/arm/mach-imx/mm-imx35.c b/arch/arm/mach-imx/mm-imx35.c
index c880e6d1ae55..bb068bc8dab7 100644
--- a/arch/arm/mach-imx/mm-imx35.c
+++ b/arch/arm/mach-imx/mm-imx35.c
@@ -27,7 +27,6 @@
27#include <mach/common.h> 27#include <mach/common.h>
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29#include <mach/iomux-v3.h> 29#include <mach/iomux-v3.h>
30#include <mach/gpio.h>
31#include <mach/irqs.h> 30#include <mach/irqs.h>
32 31
33static struct map_desc mx35_io_desc[] __initdata = { 32static struct map_desc mx35_io_desc[] __initdata = {
@@ -50,14 +49,15 @@ void __init imx35_init_early(void)
50 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); 49 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
51} 50}
52 51
53static struct mxc_gpio_port imx35_gpio_ports[] = {
54 DEFINE_IMX_GPIO_PORT_IRQ(MX35, 0, 1, MX35_INT_GPIO1),
55 DEFINE_IMX_GPIO_PORT_IRQ(MX35, 1, 2, MX35_INT_GPIO2),
56 DEFINE_IMX_GPIO_PORT_IRQ(MX35, 2, 3, MX35_INT_GPIO3),
57};
58
59void __init mx35_init_irq(void) 52void __init mx35_init_irq(void)
60{ 53{
61 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR)); 54 mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
62 mxc_gpio_init(imx35_gpio_ports, ARRAY_SIZE(imx35_gpio_ports)); 55}
56
57void __init imx35_soc_init(void)
58{
59 /* i.mx35 has the i.mx31 type gpio */
60 mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0);
61 mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0);
62 mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0);
63} 63}
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 5c147fb66a01..a5b989728b9e 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/dma-mapping.h>
20#include <linux/serial_8250.h> 21#include <linux/serial_8250.h>
21#include <linux/io.h> 22#include <linux/io.h>
22#ifdef CONFIG_MTD_PHYSMAP 23#ifdef CONFIG_MTD_PHYSMAP
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 74ed81a3cb1a..07772575d7ab 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
419/* 419/*
420 * clocksource 420 * clocksource
421 */ 421 */
422
423static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
424{
425 return *IXP4XX_OSTS;
426}
427
422unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; 428unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
423EXPORT_SYMBOL(ixp4xx_timer_freq); 429EXPORT_SYMBOL(ixp4xx_timer_freq);
424static void __init ixp4xx_clocksource_init(void) 430static void __init ixp4xx_clocksource_init(void)
425{ 431{
426 init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); 432 init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
427 433
428 clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32, 434 clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
429 clocksource_mmio_readl_up); 435 ixp4xx_clocksource_read);
430} 436}
431 437
432/* 438/*
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index 7bb78fd5a2a6..c79162a50f28 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -177,9 +177,16 @@ static struct i2c_board_info brownstone_twsi1_info[] = {
177}; 177};
178 178
179static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { 179static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
180 .max_speed = 25000000, 180 .clk_delay_cycles = 0x1f,
181}; 181};
182 182
183static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = {
184 .clk_delay_cycles = 0x1f,
185 .flags = PXA_FLAG_CARD_PERMANENT
186 | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT,
187};
188
189
183static void __init brownstone_init(void) 190static void __init brownstone_init(void)
184{ 191{
185 mfp_config(ARRAY_AND_SIZE(brownstone_pin_config)); 192 mfp_config(ARRAY_AND_SIZE(brownstone_pin_config));
@@ -189,6 +196,7 @@ static void __init brownstone_init(void)
189 mmp2_add_uart(3); 196 mmp2_add_uart(3);
190 mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); 197 mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info));
191 mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ 198 mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */
199 mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */
192 200
193 /* enable 5v regulator */ 201 /* enable 5v regulator */
194 platform_device_register(&brownstone_v_5vp_device); 202 platform_device_register(&brownstone_v_5vp_device);
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 2cbf6df09b82..de7b88826ad7 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_MACH_MMP2_H 1#ifndef __ASM_MACH_MMP2_H
2#define __ASM_MACH_MMP2_H 2#define __ASM_MACH_MMP2_H
3 3
4#include <plat/sdhci.h> 4#include <linux/platform_data/pxa_sdhci.h>
5 5
6struct sys_timer; 6struct sys_timer;
7 7
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 24172a0aad59..5d6421d63254 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -154,7 +154,7 @@ static struct i2c_board_info jasper_twsi1_info[] = {
154}; 154};
155 155
156static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { 156static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
157 .max_speed = 25000000, 157 .clk_delay_cycles = 0x1f,
158}; 158};
159 159
160static void __init jasper_init(void) 160static void __init jasper_init(void)
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index 8e6c3ac7f7c1..079c18861d5c 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -168,10 +168,10 @@ static struct clk_lookup mmp2_clkregs[] = {
168 INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), 168 INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
169 INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), 169 INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
170 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), 170 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
171 INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"), 171 INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
172 INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"), 172 INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
173 INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"), 173 INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
174 INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"), 174 INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
175}; 175};
176 176
177static int __init mmp2_init(void) 177static int __init mmp2_init(void)
@@ -222,8 +222,8 @@ MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70);
222MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); 222MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70);
223MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); 223MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70);
224MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); 224MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29);
225MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120); 225MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120);
226MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120); 226MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120);
227MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120); 227MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120);
228MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120); 228MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120);
229 229
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 72b4e7631583..ab9f999106c7 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
79static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); 79static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
80static APBC_CLK(keypad, PXA168_KPC, 0, 32000); 80static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
81 81
82static APMU_CLK(nand, NAND, 0x01db, 208000000); 82static APMU_CLK(nand, NAND, 0x19b, 156000000);
83static APMU_CLK(lcd, LCD, 0x7f, 312000000); 83static APMU_CLK(lcd, LCD, 0x7f, 312000000);
84 84
85/* device and clock bindings */ 85/* device and clock bindings */
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index 8f92ccd26edf..1464607aa60d 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
110static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); 110static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
111static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); 111static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
112 112
113static APMU_CLK(nand, NAND, 0x01db, 208000000); 113static APMU_CLK(nand, NAND, 0x19b, 156000000);
114static APMU_CLK(u2o, USB, 0x1b, 480000000); 114static APMU_CLK(u2o, USB, 0x1b, 480000000);
115 115
116/* device and clock bindings */ 116/* device and clock bindings */
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 1516896e8d17..888e92502e15 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3
148 148
149endmenu 149endmenu
150 150
151config MSM_IOMMU
152 bool "MSM IOMMU Support"
153 depends on ARCH_MSM8X60 || ARCH_MSM8960
154 select IOMMU_API
155 default n
156 help
157 Support for the IOMMUs found on certain Qualcomm SOCs.
158 These IOMMUs allow virtualization of the address space used by most
159 cores within the multimedia subsystem.
160
161 If unsure, say N here.
162
163config IOMMU_PGTABLES_L2
164 def_bool y
165 depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
166
167config MSM_DEBUG_UART 151config MSM_DEBUG_UART
168 int 152 int
169 default 1 if MSM_DEBUG_UART1 153 default 1 if MSM_DEBUG_UART1
@@ -205,9 +189,6 @@ config MSM_GPIOMUX
205config MSM_V2_TLMM 189config MSM_V2_TLMM
206 bool 190 bool
207 191
208config IOMMU_API
209 bool
210
211config MSM_SCM 192config MSM_SCM
212 bool 193 bool
213endif 194endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 9519fd28a025..b70658c5ae00 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -3,7 +3,7 @@ obj-y += clock.o
3obj-$(CONFIG_DEBUG_FS) += clock-debug.o 3obj-$(CONFIG_DEBUG_FS) += clock-debug.o
4 4
5obj-$(CONFIG_MSM_VIC) += irq-vic.o 5obj-$(CONFIG_MSM_VIC) += irq-vic.o
6obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o 6obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o
7 7
8obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o 8obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o
9obj-$(CONFIG_ARCH_MSM7X30) += dma.o 9obj-$(CONFIG_ARCH_MSM7X30) += dma.o
diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c
deleted file mode 100644
index 1a584e077c61..000000000000
--- a/arch/arm/mach-msm/iommu.c
+++ /dev/null
@@ -1,731 +0,0 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/iommu.h>
29#include <linux/clk.h>
30
31#include <asm/cacheflush.h>
32#include <asm/sizes.h>
33
34#include <mach/iommu_hw-8xxx.h>
35#include <mach/iommu.h>
36
37#define MRC(reg, processor, op1, crn, crm, op2) \
38__asm__ __volatile__ ( \
39" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40: "=r" (reg))
41
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44
45static int msm_iommu_tex_class[4];
46
47DEFINE_SPINLOCK(msm_iommu_lock);
48
49struct msm_priv {
50 unsigned long *pgtable;
51 struct list_head list_attached;
52};
53
54static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
55{
56 int ret;
57
58 ret = clk_enable(drvdata->pclk);
59 if (ret)
60 goto fail;
61
62 if (drvdata->clk) {
63 ret = clk_enable(drvdata->clk);
64 if (ret)
65 clk_disable(drvdata->pclk);
66 }
67fail:
68 return ret;
69}
70
71static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
72{
73 if (drvdata->clk)
74 clk_disable(drvdata->clk);
75 clk_disable(drvdata->pclk);
76}
77
78static int __flush_iotlb(struct iommu_domain *domain)
79{
80 struct msm_priv *priv = domain->priv;
81 struct msm_iommu_drvdata *iommu_drvdata;
82 struct msm_iommu_ctx_drvdata *ctx_drvdata;
83 int ret = 0;
84#ifndef CONFIG_IOMMU_PGTABLES_L2
85 unsigned long *fl_table = priv->pgtable;
86 int i;
87
88 if (!list_empty(&priv->list_attached)) {
89 dmac_flush_range(fl_table, fl_table + SZ_16K);
90
91 for (i = 0; i < NUM_FL_PTE; i++)
92 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
93 void *sl_table = __va(fl_table[i] &
94 FL_BASE_MASK);
95 dmac_flush_range(sl_table, sl_table + SZ_4K);
96 }
97 }
98#endif
99
100 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
101 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
102 BUG();
103
104 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
105 BUG_ON(!iommu_drvdata);
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
112 __disable_clocks(iommu_drvdata);
113 }
114fail:
115 return ret;
116}
117
118static void __reset_context(void __iomem *base, int ctx)
119{
120 SET_BPRCOSH(base, ctx, 0);
121 SET_BPRCISH(base, ctx, 0);
122 SET_BPRCNSH(base, ctx, 0);
123 SET_BPSHCFG(base, ctx, 0);
124 SET_BPMTCFG(base, ctx, 0);
125 SET_ACTLR(base, ctx, 0);
126 SET_SCTLR(base, ctx, 0);
127 SET_FSRRESTORE(base, ctx, 0);
128 SET_TTBR0(base, ctx, 0);
129 SET_TTBR1(base, ctx, 0);
130 SET_TTBCR(base, ctx, 0);
131 SET_BFBCR(base, ctx, 0);
132 SET_PAR(base, ctx, 0);
133 SET_FAR(base, ctx, 0);
134 SET_CTX_TLBIALL(base, ctx, 0);
135 SET_TLBFLPTER(base, ctx, 0);
136 SET_TLBSLPTER(base, ctx, 0);
137 SET_TLBLKCR(base, ctx, 0);
138 SET_PRRR(base, ctx, 0);
139 SET_NMRR(base, ctx, 0);
140}
141
142static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
143{
144 unsigned int prrr, nmrr;
145 __reset_context(base, ctx);
146
147 /* Set up HTW mode */
148 /* TLB miss configuration: perform HTW on miss */
149 SET_TLBMCFG(base, ctx, 0x3);
150
151 /* V2P configuration: HTW for access */
152 SET_V2PCFG(base, ctx, 0x3);
153
154 SET_TTBCR(base, ctx, 0);
155 SET_TTBR0_PA(base, ctx, (pgtable >> 14));
156
157 /* Invalidate the TLB for this context */
158 SET_CTX_TLBIALL(base, ctx, 0);
159
160 /* Set interrupt number to "secure" interrupt */
161 SET_IRPTNDX(base, ctx, 0);
162
163 /* Enable context fault interrupt */
164 SET_CFEIE(base, ctx, 1);
165
166 /* Stall access on a context fault and let the handler deal with it */
167 SET_CFCFG(base, ctx, 1);
168
169 /* Redirect all cacheable requests to L2 slave port. */
170 SET_RCISH(base, ctx, 1);
171 SET_RCOSH(base, ctx, 1);
172 SET_RCNSH(base, ctx, 1);
173
174 /* Turn on TEX Remap */
175 SET_TRE(base, ctx, 1);
176
177 /* Set TEX remap attributes */
178 RCP15_PRRR(prrr);
179 RCP15_NMRR(nmrr);
180 SET_PRRR(base, ctx, prrr);
181 SET_NMRR(base, ctx, nmrr);
182
183 /* Turn on BFB prefetch */
184 SET_BFBDFE(base, ctx, 1);
185
186#ifdef CONFIG_IOMMU_PGTABLES_L2
187 /* Configure page tables as inner-cacheable and shareable to reduce
188 * the TLB miss penalty.
189 */
190 SET_TTBR0_SH(base, ctx, 1);
191 SET_TTBR1_SH(base, ctx, 1);
192
193 SET_TTBR0_NOS(base, ctx, 1);
194 SET_TTBR1_NOS(base, ctx, 1);
195
196 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
197 SET_TTBR0_IRGNL(base, ctx, 1);
198
199 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
200 SET_TTBR1_IRGNL(base, ctx, 1);
201
202 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
203 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
204#endif
205
206 /* Enable the MMU */
207 SET_M(base, ctx, 1);
208}
209
210static int msm_iommu_domain_init(struct iommu_domain *domain)
211{
212 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
213
214 if (!priv)
215 goto fail_nomem;
216
217 INIT_LIST_HEAD(&priv->list_attached);
218 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
219 get_order(SZ_16K));
220
221 if (!priv->pgtable)
222 goto fail_nomem;
223
224 memset(priv->pgtable, 0, SZ_16K);
225 domain->priv = priv;
226 return 0;
227
228fail_nomem:
229 kfree(priv);
230 return -ENOMEM;
231}
232
233static void msm_iommu_domain_destroy(struct iommu_domain *domain)
234{
235 struct msm_priv *priv;
236 unsigned long flags;
237 unsigned long *fl_table;
238 int i;
239
240 spin_lock_irqsave(&msm_iommu_lock, flags);
241 priv = domain->priv;
242 domain->priv = NULL;
243
244 if (priv) {
245 fl_table = priv->pgtable;
246
247 for (i = 0; i < NUM_FL_PTE; i++)
248 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
249 free_page((unsigned long) __va(((fl_table[i]) &
250 FL_BASE_MASK)));
251
252 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
253 priv->pgtable = NULL;
254 }
255
256 kfree(priv);
257 spin_unlock_irqrestore(&msm_iommu_lock, flags);
258}
259
260static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
261{
262 struct msm_priv *priv;
263 struct msm_iommu_ctx_dev *ctx_dev;
264 struct msm_iommu_drvdata *iommu_drvdata;
265 struct msm_iommu_ctx_drvdata *ctx_drvdata;
266 struct msm_iommu_ctx_drvdata *tmp_drvdata;
267 int ret = 0;
268 unsigned long flags;
269
270 spin_lock_irqsave(&msm_iommu_lock, flags);
271
272 priv = domain->priv;
273
274 if (!priv || !dev) {
275 ret = -EINVAL;
276 goto fail;
277 }
278
279 iommu_drvdata = dev_get_drvdata(dev->parent);
280 ctx_drvdata = dev_get_drvdata(dev);
281 ctx_dev = dev->platform_data;
282
283 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
284 ret = -EINVAL;
285 goto fail;
286 }
287
288 if (!list_empty(&ctx_drvdata->attached_elm)) {
289 ret = -EBUSY;
290 goto fail;
291 }
292
293 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
294 if (tmp_drvdata == ctx_drvdata) {
295 ret = -EBUSY;
296 goto fail;
297 }
298
299 ret = __enable_clocks(iommu_drvdata);
300 if (ret)
301 goto fail;
302
303 __program_context(iommu_drvdata->base, ctx_dev->num,
304 __pa(priv->pgtable));
305
306 __disable_clocks(iommu_drvdata);
307 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
308 ret = __flush_iotlb(domain);
309
310fail:
311 spin_unlock_irqrestore(&msm_iommu_lock, flags);
312 return ret;
313}
314
315static void msm_iommu_detach_dev(struct iommu_domain *domain,
316 struct device *dev)
317{
318 struct msm_priv *priv;
319 struct msm_iommu_ctx_dev *ctx_dev;
320 struct msm_iommu_drvdata *iommu_drvdata;
321 struct msm_iommu_ctx_drvdata *ctx_drvdata;
322 unsigned long flags;
323 int ret;
324
325 spin_lock_irqsave(&msm_iommu_lock, flags);
326 priv = domain->priv;
327
328 if (!priv || !dev)
329 goto fail;
330
331 iommu_drvdata = dev_get_drvdata(dev->parent);
332 ctx_drvdata = dev_get_drvdata(dev);
333 ctx_dev = dev->platform_data;
334
335 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
336 goto fail;
337
338 ret = __flush_iotlb(domain);
339 if (ret)
340 goto fail;
341
342 ret = __enable_clocks(iommu_drvdata);
343 if (ret)
344 goto fail;
345
346 __reset_context(iommu_drvdata->base, ctx_dev->num);
347 __disable_clocks(iommu_drvdata);
348 list_del_init(&ctx_drvdata->attached_elm);
349
350fail:
351 spin_unlock_irqrestore(&msm_iommu_lock, flags);
352}
353
354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355 phys_addr_t pa, int order, int prot)
356{
357 struct msm_priv *priv;
358 unsigned long flags;
359 unsigned long *fl_table;
360 unsigned long *fl_pte;
361 unsigned long fl_offset;
362 unsigned long *sl_table;
363 unsigned long *sl_pte;
364 unsigned long sl_offset;
365 unsigned int pgprot;
366 size_t len = 0x1000UL << order;
367 int ret = 0, tex, sh;
368
369 spin_lock_irqsave(&msm_iommu_lock, flags);
370
371 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
372 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
373
374 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
375 ret = -EINVAL;
376 goto fail;
377 }
378
379 priv = domain->priv;
380 if (!priv) {
381 ret = -EINVAL;
382 goto fail;
383 }
384
385 fl_table = priv->pgtable;
386
387 if (len != SZ_16M && len != SZ_1M &&
388 len != SZ_64K && len != SZ_4K) {
389 pr_debug("Bad size: %d\n", len);
390 ret = -EINVAL;
391 goto fail;
392 }
393
394 if (!fl_table) {
395 pr_debug("Null page table\n");
396 ret = -EINVAL;
397 goto fail;
398 }
399
400 if (len == SZ_16M || len == SZ_1M) {
401 pgprot = sh ? FL_SHARED : 0;
402 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
403 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
404 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
405 } else {
406 pgprot = sh ? SL_SHARED : 0;
407 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
408 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
409 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
410 }
411
412 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
413 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
414
415 if (len == SZ_16M) {
416 int i = 0;
417 for (i = 0; i < 16; i++)
418 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
419 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
420 FL_SHARED | FL_NG | pgprot;
421 }
422
423 if (len == SZ_1M)
424 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
425 FL_TYPE_SECT | FL_SHARED | pgprot;
426
427 /* Need a 2nd level table */
428 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
429 unsigned long *sl;
430 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
431 get_order(SZ_4K));
432
433 if (!sl) {
434 pr_debug("Could not allocate second level table\n");
435 ret = -ENOMEM;
436 goto fail;
437 }
438
439 memset(sl, 0, SZ_4K);
440 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
441 }
442
443 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
444 sl_offset = SL_OFFSET(va);
445 sl_pte = sl_table + sl_offset;
446
447
448 if (len == SZ_4K)
449 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
450 SL_SHARED | SL_TYPE_SMALL | pgprot;
451
452 if (len == SZ_64K) {
453 int i;
454
455 for (i = 0; i < 16; i++)
456 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
457 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
458 }
459
460 ret = __flush_iotlb(domain);
461fail:
462 spin_unlock_irqrestore(&msm_iommu_lock, flags);
463 return ret;
464}
465
466static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467 int order)
468{
469 struct msm_priv *priv;
470 unsigned long flags;
471 unsigned long *fl_table;
472 unsigned long *fl_pte;
473 unsigned long fl_offset;
474 unsigned long *sl_table;
475 unsigned long *sl_pte;
476 unsigned long sl_offset;
477 size_t len = 0x1000UL << order;
478 int i, ret = 0;
479
480 spin_lock_irqsave(&msm_iommu_lock, flags);
481
482 priv = domain->priv;
483
484 if (!priv) {
485 ret = -ENODEV;
486 goto fail;
487 }
488
489 fl_table = priv->pgtable;
490
491 if (len != SZ_16M && len != SZ_1M &&
492 len != SZ_64K && len != SZ_4K) {
493 pr_debug("Bad length: %d\n", len);
494 ret = -EINVAL;
495 goto fail;
496 }
497
498 if (!fl_table) {
499 pr_debug("Null page table\n");
500 ret = -EINVAL;
501 goto fail;
502 }
503
504 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
505 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
506
507 if (*fl_pte == 0) {
508 pr_debug("First level PTE is 0\n");
509 ret = -ENODEV;
510 goto fail;
511 }
512
513 /* Unmap supersection */
514 if (len == SZ_16M)
515 for (i = 0; i < 16; i++)
516 *(fl_pte+i) = 0;
517
518 if (len == SZ_1M)
519 *fl_pte = 0;
520
521 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
522 sl_offset = SL_OFFSET(va);
523 sl_pte = sl_table + sl_offset;
524
525 if (len == SZ_64K) {
526 for (i = 0; i < 16; i++)
527 *(sl_pte+i) = 0;
528 }
529
530 if (len == SZ_4K)
531 *sl_pte = 0;
532
533 if (len == SZ_4K || len == SZ_64K) {
534 int used = 0;
535
536 for (i = 0; i < NUM_SL_PTE; i++)
537 if (sl_table[i])
538 used = 1;
539 if (!used) {
540 free_page((unsigned long)sl_table);
541 *fl_pte = 0;
542 }
543 }
544
545 ret = __flush_iotlb(domain);
546fail:
547 spin_unlock_irqrestore(&msm_iommu_lock, flags);
548 return ret;
549}
550
551static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
552 unsigned long va)
553{
554 struct msm_priv *priv;
555 struct msm_iommu_drvdata *iommu_drvdata;
556 struct msm_iommu_ctx_drvdata *ctx_drvdata;
557 unsigned int par;
558 unsigned long flags;
559 void __iomem *base;
560 phys_addr_t ret = 0;
561 int ctx;
562
563 spin_lock_irqsave(&msm_iommu_lock, flags);
564
565 priv = domain->priv;
566 if (list_empty(&priv->list_attached))
567 goto fail;
568
569 ctx_drvdata = list_entry(priv->list_attached.next,
570 struct msm_iommu_ctx_drvdata, attached_elm);
571 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
572
573 base = iommu_drvdata->base;
574 ctx = ctx_drvdata->num;
575
576 ret = __enable_clocks(iommu_drvdata);
577 if (ret)
578 goto fail;
579
580 /* Invalidate context TLB */
581 SET_CTX_TLBIALL(base, ctx, 0);
582 SET_V2PPR(base, ctx, va & V2Pxx_VA);
583
584 par = GET_PAR(base, ctx);
585
586 /* We are dealing with a supersection */
587 if (GET_NOFAULT_SS(base, ctx))
588 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
589 else /* Upper 20 bits from PAR, lower 12 from VA */
590 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
591
592 if (GET_FAULT(base, ctx))
593 ret = 0;
594
595 __disable_clocks(iommu_drvdata);
596fail:
597 spin_unlock_irqrestore(&msm_iommu_lock, flags);
598 return ret;
599}
600
601static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
602 unsigned long cap)
603{
604 return 0;
605}
606
607static void print_ctx_regs(void __iomem *base, int ctx)
608{
609 unsigned int fsr = GET_FSR(base, ctx);
610 pr_err("FAR = %08x PAR = %08x\n",
611 GET_FAR(base, ctx), GET_PAR(base, ctx));
612 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
613 (fsr & 0x02) ? "TF " : "",
614 (fsr & 0x04) ? "AFF " : "",
615 (fsr & 0x08) ? "APF " : "",
616 (fsr & 0x10) ? "TLBMF " : "",
617 (fsr & 0x20) ? "HTWDEEF " : "",
618 (fsr & 0x40) ? "HTWSEEF " : "",
619 (fsr & 0x80) ? "MHF " : "",
620 (fsr & 0x10000) ? "SL " : "",
621 (fsr & 0x40000000) ? "SS " : "",
622 (fsr & 0x80000000) ? "MULTI " : "");
623
624 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
625 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
626 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
627 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
628 pr_err("SCTLR = %08x ACTLR = %08x\n",
629 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
630 pr_err("PRRR = %08x NMRR = %08x\n",
631 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
632}
633
634irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
635{
636 struct msm_iommu_drvdata *drvdata = dev_id;
637 void __iomem *base;
638 unsigned int fsr;
639 int i, ret;
640
641 spin_lock(&msm_iommu_lock);
642
643 if (!drvdata) {
644 pr_err("Invalid device ID in context interrupt handler\n");
645 goto fail;
646 }
647
648 base = drvdata->base;
649
650 pr_err("Unexpected IOMMU page fault!\n");
651 pr_err("base = %08x\n", (unsigned int) base);
652
653 ret = __enable_clocks(drvdata);
654 if (ret)
655 goto fail;
656
657 for (i = 0; i < drvdata->ncb; i++) {
658 fsr = GET_FSR(base, i);
659 if (fsr) {
660 pr_err("Fault occurred in context %d.\n", i);
661 pr_err("Interesting registers:\n");
662 print_ctx_regs(base, i);
663 SET_FSR(base, i, 0x4000000F);
664 }
665 }
666 __disable_clocks(drvdata);
667fail:
668 spin_unlock(&msm_iommu_lock);
669 return 0;
670}
671
672static struct iommu_ops msm_iommu_ops = {
673 .domain_init = msm_iommu_domain_init,
674 .domain_destroy = msm_iommu_domain_destroy,
675 .attach_dev = msm_iommu_attach_dev,
676 .detach_dev = msm_iommu_detach_dev,
677 .map = msm_iommu_map,
678 .unmap = msm_iommu_unmap,
679 .iova_to_phys = msm_iommu_iova_to_phys,
680 .domain_has_cap = msm_iommu_domain_has_cap
681};
682
683static int __init get_tex_class(int icp, int ocp, int mt, int nos)
684{
685 int i = 0;
686 unsigned int prrr = 0;
687 unsigned int nmrr = 0;
688 int c_icp, c_ocp, c_mt, c_nos;
689
690 RCP15_PRRR(prrr);
691 RCP15_NMRR(nmrr);
692
693 for (i = 0; i < NUM_TEX_CLASS; i++) {
694 c_nos = PRRR_NOS(prrr, i);
695 c_mt = PRRR_MT(prrr, i);
696 c_icp = NMRR_ICP(nmrr, i);
697 c_ocp = NMRR_OCP(nmrr, i);
698
699 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
700 return i;
701 }
702
703 return -ENODEV;
704}
705
706static void __init setup_iommu_tex_classes(void)
707{
708 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
709 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
710
711 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
712 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
713
714 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
715 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
716
717 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
718 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
719}
720
721static int __init msm_iommu_init(void)
722{
723 setup_iommu_tex_classes();
724 register_iommu(&msm_iommu_ops);
725 return 0;
726}
727
728subsys_initcall(msm_iommu_init);
729
730MODULE_LICENSE("GPL v2");
731MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
deleted file mode 100644
index 8e8fb079852d..000000000000
--- a/arch/arm/mach-msm/iommu_dev.c
+++ /dev/null
@@ -1,422 +0,0 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/iommu.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/slab.h>
29
30#include <mach/iommu_hw-8xxx.h>
31#include <mach/iommu.h>
32#include <mach/clk.h>
33
34struct iommu_ctx_iter_data {
35 /* input */
36 const char *name;
37
38 /* output */
39 struct device *dev;
40};
41
42static struct platform_device *msm_iommu_root_dev;
43
44static int each_iommu_ctx(struct device *dev, void *data)
45{
46 struct iommu_ctx_iter_data *res = data;
47 struct msm_iommu_ctx_dev *c = dev->platform_data;
48
49 if (!res || !c || !c->name || !res->name)
50 return -EINVAL;
51
52 if (!strcmp(res->name, c->name)) {
53 res->dev = dev;
54 return 1;
55 }
56 return 0;
57}
58
59static int each_iommu(struct device *dev, void *data)
60{
61 return device_for_each_child(dev, data, each_iommu_ctx);
62}
63
64struct device *msm_iommu_get_ctx(const char *ctx_name)
65{
66 struct iommu_ctx_iter_data r;
67 int found;
68
69 if (!msm_iommu_root_dev) {
70 pr_err("No root IOMMU device.\n");
71 goto fail;
72 }
73
74 r.name = ctx_name;
75 found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
76
77 if (!found) {
78 pr_err("Could not find context <%s>\n", ctx_name);
79 goto fail;
80 }
81
82 return r.dev;
83fail:
84 return NULL;
85}
86EXPORT_SYMBOL(msm_iommu_get_ctx);
87
88static void msm_iommu_reset(void __iomem *base, int ncb)
89{
90 int ctx;
91
92 SET_RPUE(base, 0);
93 SET_RPUEIE(base, 0);
94 SET_ESRRESTORE(base, 0);
95 SET_TBE(base, 0);
96 SET_CR(base, 0);
97 SET_SPDMBE(base, 0);
98 SET_TESTBUSCR(base, 0);
99 SET_TLBRSW(base, 0);
100 SET_GLOBAL_TLBIALL(base, 0);
101 SET_RPU_ACR(base, 0);
102 SET_TLBLKCRWE(base, 1);
103
104 for (ctx = 0; ctx < ncb; ctx++) {
105 SET_BPRCOSH(base, ctx, 0);
106 SET_BPRCISH(base, ctx, 0);
107 SET_BPRCNSH(base, ctx, 0);
108 SET_BPSHCFG(base, ctx, 0);
109 SET_BPMTCFG(base, ctx, 0);
110 SET_ACTLR(base, ctx, 0);
111 SET_SCTLR(base, ctx, 0);
112 SET_FSRRESTORE(base, ctx, 0);
113 SET_TTBR0(base, ctx, 0);
114 SET_TTBR1(base, ctx, 0);
115 SET_TTBCR(base, ctx, 0);
116 SET_BFBCR(base, ctx, 0);
117 SET_PAR(base, ctx, 0);
118 SET_FAR(base, ctx, 0);
119 SET_CTX_TLBIALL(base, ctx, 0);
120 SET_TLBFLPTER(base, ctx, 0);
121 SET_TLBSLPTER(base, ctx, 0);
122 SET_TLBLKCR(base, ctx, 0);
123 SET_PRRR(base, ctx, 0);
124 SET_NMRR(base, ctx, 0);
125 SET_CONTEXTIDR(base, ctx, 0);
126 }
127}
128
129static int msm_iommu_probe(struct platform_device *pdev)
130{
131 struct resource *r, *r2;
132 struct clk *iommu_clk;
133 struct clk *iommu_pclk;
134 struct msm_iommu_drvdata *drvdata;
135 struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
136 void __iomem *regs_base;
137 resource_size_t len;
138 int ret, irq, par;
139
140 if (pdev->id == -1) {
141 msm_iommu_root_dev = pdev;
142 return 0;
143 }
144
145 drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
146
147 if (!drvdata) {
148 ret = -ENOMEM;
149 goto fail;
150 }
151
152 if (!iommu_dev) {
153 ret = -ENODEV;
154 goto fail;
155 }
156
157 iommu_pclk = clk_get(NULL, "smmu_pclk");
158 if (IS_ERR(iommu_pclk)) {
159 ret = -ENODEV;
160 goto fail;
161 }
162
163 ret = clk_enable(iommu_pclk);
164 if (ret)
165 goto fail_enable;
166
167 iommu_clk = clk_get(&pdev->dev, "iommu_clk");
168
169 if (!IS_ERR(iommu_clk)) {
170 if (clk_get_rate(iommu_clk) == 0)
171 clk_set_min_rate(iommu_clk, 1);
172
173 ret = clk_enable(iommu_clk);
174 if (ret) {
175 clk_put(iommu_clk);
176 goto fail_pclk;
177 }
178 } else
179 iommu_clk = NULL;
180
181 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
182
183 if (!r) {
184 ret = -ENODEV;
185 goto fail_clk;
186 }
187
188 len = resource_size(r);
189
190 r2 = request_mem_region(r->start, len, r->name);
191 if (!r2) {
192 pr_err("Could not request memory region: start=%p, len=%d\n",
193 (void *) r->start, len);
194 ret = -EBUSY;
195 goto fail_clk;
196 }
197
198 regs_base = ioremap(r2->start, len);
199
200 if (!regs_base) {
201 pr_err("Could not ioremap: start=%p, len=%d\n",
202 (void *) r2->start, len);
203 ret = -EBUSY;
204 goto fail_mem;
205 }
206
207 irq = platform_get_irq_byname(pdev, "secure_irq");
208 if (irq < 0) {
209 ret = -ENODEV;
210 goto fail_io;
211 }
212
213 msm_iommu_reset(regs_base, iommu_dev->ncb);
214
215 SET_M(regs_base, 0, 1);
216 SET_PAR(regs_base, 0, 0);
217 SET_V2PCFG(regs_base, 0, 1);
218 SET_V2PPR(regs_base, 0, 0);
219 par = GET_PAR(regs_base, 0);
220 SET_V2PCFG(regs_base, 0, 0);
221 SET_M(regs_base, 0, 0);
222
223 if (!par) {
224 pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
225 ret = -ENODEV;
226 goto fail_io;
227 }
228
229 ret = request_irq(irq, msm_iommu_fault_handler, 0,
230 "msm_iommu_secure_irpt_handler", drvdata);
231 if (ret) {
232 pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
233 goto fail_io;
234 }
235
236
237 drvdata->pclk = iommu_pclk;
238 drvdata->clk = iommu_clk;
239 drvdata->base = regs_base;
240 drvdata->irq = irq;
241 drvdata->ncb = iommu_dev->ncb;
242
243 pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
244 iommu_dev->name, regs_base, irq, iommu_dev->ncb);
245
246 platform_set_drvdata(pdev, drvdata);
247
248 if (iommu_clk)
249 clk_disable(iommu_clk);
250
251 clk_disable(iommu_pclk);
252
253 return 0;
254fail_io:
255 iounmap(regs_base);
256fail_mem:
257 release_mem_region(r->start, len);
258fail_clk:
259 if (iommu_clk) {
260 clk_disable(iommu_clk);
261 clk_put(iommu_clk);
262 }
263fail_pclk:
264 clk_disable(iommu_pclk);
265fail_enable:
266 clk_put(iommu_pclk);
267fail:
268 kfree(drvdata);
269 return ret;
270}
271
272static int msm_iommu_remove(struct platform_device *pdev)
273{
274 struct msm_iommu_drvdata *drv = NULL;
275
276 drv = platform_get_drvdata(pdev);
277 if (drv) {
278 if (drv->clk)
279 clk_put(drv->clk);
280 clk_put(drv->pclk);
281 memset(drv, 0, sizeof(*drv));
282 kfree(drv);
283 platform_set_drvdata(pdev, NULL);
284 }
285 return 0;
286}
287
288static int msm_iommu_ctx_probe(struct platform_device *pdev)
289{
290 struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
291 struct msm_iommu_drvdata *drvdata;
292 struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
293 int i, ret;
294 if (!c || !pdev->dev.parent) {
295 ret = -EINVAL;
296 goto fail;
297 }
298
299 drvdata = dev_get_drvdata(pdev->dev.parent);
300
301 if (!drvdata) {
302 ret = -ENODEV;
303 goto fail;
304 }
305
306 ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
307 if (!ctx_drvdata) {
308 ret = -ENOMEM;
309 goto fail;
310 }
311 ctx_drvdata->num = c->num;
312 ctx_drvdata->pdev = pdev;
313
314 INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
315 platform_set_drvdata(pdev, ctx_drvdata);
316
317 ret = clk_enable(drvdata->pclk);
318 if (ret)
319 goto fail;
320
321 if (drvdata->clk) {
322 ret = clk_enable(drvdata->clk);
323 if (ret) {
324 clk_disable(drvdata->pclk);
325 goto fail;
326 }
327 }
328
329 /* Program the M2V tables for this context */
330 for (i = 0; i < MAX_NUM_MIDS; i++) {
331 int mid = c->mids[i];
332 if (mid == -1)
333 break;
334
335 SET_M2VCBR_N(drvdata->base, mid, 0);
336 SET_CBACR_N(drvdata->base, c->num, 0);
337
338 /* Set VMID = 0 */
339 SET_VMID(drvdata->base, mid, 0);
340
341 /* Set the context number for that MID to this context */
342 SET_CBNDX(drvdata->base, mid, c->num);
343
344 /* Set MID associated with this context bank to 0*/
345 SET_CBVMID(drvdata->base, c->num, 0);
346
347 /* Set the ASID for TLB tagging for this context */
348 SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
349
350 /* Set security bit override to be Non-secure */
351 SET_NSCFG(drvdata->base, mid, 3);
352 }
353
354 if (drvdata->clk)
355 clk_disable(drvdata->clk);
356 clk_disable(drvdata->pclk);
357
358 dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
359 return 0;
360fail:
361 kfree(ctx_drvdata);
362 return ret;
363}
364
365static int msm_iommu_ctx_remove(struct platform_device *pdev)
366{
367 struct msm_iommu_ctx_drvdata *drv = NULL;
368 drv = platform_get_drvdata(pdev);
369 if (drv) {
370 memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
371 kfree(drv);
372 platform_set_drvdata(pdev, NULL);
373 }
374 return 0;
375}
376
377static struct platform_driver msm_iommu_driver = {
378 .driver = {
379 .name = "msm_iommu",
380 },
381 .probe = msm_iommu_probe,
382 .remove = msm_iommu_remove,
383};
384
385static struct platform_driver msm_iommu_ctx_driver = {
386 .driver = {
387 .name = "msm_iommu_ctx",
388 },
389 .probe = msm_iommu_ctx_probe,
390 .remove = msm_iommu_ctx_remove,
391};
392
393static int __init msm_iommu_driver_init(void)
394{
395 int ret;
396 ret = platform_driver_register(&msm_iommu_driver);
397 if (ret != 0) {
398 pr_err("Failed to register IOMMU driver\n");
399 goto error;
400 }
401
402 ret = platform_driver_register(&msm_iommu_ctx_driver);
403 if (ret != 0) {
404 pr_err("Failed to register IOMMU context driver\n");
405 goto error;
406 }
407
408error:
409 return ret;
410}
411
412static void __exit msm_iommu_driver_exit(void)
413{
414 platform_driver_unregister(&msm_iommu_ctx_driver);
415 platform_driver_unregister(&msm_iommu_driver);
416}
417
418subsys_initcall(msm_iommu_driver_init);
419module_exit(msm_iommu_driver_exit);
420
421MODULE_LICENSE("GPL v2");
422MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index 4efa02ee1639..add0d42de7af 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -245,6 +245,8 @@ __setup("otg_mode=", eukrea_cpuimx51_otg_mode);
245 */ 245 */
246static void __init eukrea_cpuimx51_init(void) 246static void __init eukrea_cpuimx51_init(void)
247{ 247{
248 imx51_soc_init();
249
248 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads, 250 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads,
249 ARRAY_SIZE(eukrea_cpuimx51_pads)); 251 ARRAY_SIZE(eukrea_cpuimx51_pads));
250 252
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index 5ef25a596143..ff096d587299 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -264,6 +264,8 @@ static struct platform_device *platform_devices[] __initdata = {
264 264
265static void __init eukrea_cpuimx51sd_init(void) 265static void __init eukrea_cpuimx51sd_init(void)
266{ 266{
267 imx51_soc_init();
268
267 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads, 269 mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads,
268 ARRAY_SIZE(eukrea_cpuimx51sd_pads)); 270 ARRAY_SIZE(eukrea_cpuimx51sd_pads));
269 271
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index 11210e1ae42a..7de25c6712eb 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -192,6 +192,8 @@ static const struct imxi2c_platform_data i2c_data __initconst = {
192 */ 192 */
193static void __init mx50_rdp_board_init(void) 193static void __init mx50_rdp_board_init(void)
194{ 194{
195 imx50_soc_init();
196
195 mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads, 197 mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads,
196 ARRAY_SIZE(mx50_rdp_pads)); 198 ARRAY_SIZE(mx50_rdp_pads));
197 199
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 63dfbeafbc1e..3112d15feebc 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -135,6 +135,8 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = {
135 */ 135 */
136static void __init mx51_3ds_init(void) 136static void __init mx51_3ds_init(void)
137{ 137{
138 imx51_soc_init();
139
138 mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads, 140 mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads,
139 ARRAY_SIZE(mx51_3ds_pads)); 141 ARRAY_SIZE(mx51_3ds_pads));
140 142
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index c7b3fabf50f9..6021dd00ec75 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -340,6 +340,8 @@ static void __init mx51_babbage_init(void)
340 iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 | 340 iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 |
341 MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP); 341 MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
342 342
343 imx51_soc_init();
344
343#if defined(CONFIG_CPU_FREQ_IMX) 345#if defined(CONFIG_CPU_FREQ_IMX)
344 get_cpu_op = mx51_get_cpu_op; 346 get_cpu_op = mx51_get_cpu_op;
345#endif 347#endif
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index 6e362315291b..3be603b9075a 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -236,6 +236,8 @@ late_initcall(mx51_efikamx_power_init);
236 236
237static void __init mx51_efikamx_init(void) 237static void __init mx51_efikamx_init(void)
238{ 238{
239 imx51_soc_init();
240
239 mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads, 241 mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads,
240 ARRAY_SIZE(mx51efikamx_pads)); 242 ARRAY_SIZE(mx51efikamx_pads));
241 efika_board_common_init(); 243 efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 474fc6e4c6df..4b2e522de0f8 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -248,6 +248,8 @@ static void __init mx51_efikasb_board_id(void)
248 248
249static void __init efikasb_board_init(void) 249static void __init efikasb_board_init(void)
250{ 250{
251 imx51_soc_init();
252
251 mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads, 253 mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads,
252 ARRAY_SIZE(mx51efikasb_pads)); 254 ARRAY_SIZE(mx51efikasb_pads));
253 efika_board_common_init(); 255 efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index f87d571882c6..0d9218a6e2d2 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -117,6 +117,8 @@ static const struct spi_imx_master mx53_evk_spi_data __initconst = {
117 117
118static void __init mx53_evk_board_init(void) 118static void __init mx53_evk_board_init(void)
119{ 119{
120 imx53_soc_init();
121
120 mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads, 122 mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
121 ARRAY_SIZE(mx53_evk_pads)); 123 ARRAY_SIZE(mx53_evk_pads));
122 mx53_evk_init_uart(); 124 mx53_evk_init_uart();
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 1b947e8c9c0c..359c3e248add 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -227,6 +227,8 @@ static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = {
227 227
228static void __init mx53_loco_board_init(void) 228static void __init mx53_loco_board_init(void)
229{ 229{
230 imx53_soc_init();
231
230 mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads, 232 mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
231 ARRAY_SIZE(mx53_loco_pads)); 233 ARRAY_SIZE(mx53_loco_pads));
232 imx53_add_imx_uart(0, NULL); 234 imx53_add_imx_uart(0, NULL);
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 817c08938f55..bc02894eafef 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -113,6 +113,8 @@ static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = {
113 113
114static void __init mx53_smd_board_init(void) 114static void __init mx53_smd_board_init(void)
115{ 115{
116 imx53_soc_init();
117
116 mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads, 118 mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
117 ARRAY_SIZE(mx53_smd_pads)); 119 ARRAY_SIZE(mx53_smd_pads));
118 mx53_smd_init_uart(); 120 mx53_smd_init_uart();
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 6b89c1bf4eb2..cd79e3435e28 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = {
1442 _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk) 1442 _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
1443 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) 1443 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1444 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) 1444 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1445 _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk) 1445 /* i.mx51 has the i.mx35 type cspi */
1446 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1446 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk) 1447 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
1447 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk) 1448 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
1448 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk) 1449 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk)
@@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = {
1471 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk) 1472 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk)
1472 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk) 1473 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk)
1473 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk) 1474 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk)
1474 _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk) 1475 /* i.mx53 has the i.mx51 type ecspi */
1475 _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk) 1476 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1476 _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk) 1477 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1478 /* i.mx53 has the i.mx25 type cspi */
1479 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1477 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk) 1480 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
1478 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk) 1481 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
1479}; 1482};
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index 153ada53e575..371ca8c8414c 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/gpio.h>
16#include <mach/hardware.h> 15#include <mach/hardware.h>
17#include <mach/imx-uart.h> 16#include <mach/imx-uart.h>
18#include <mach/irqs.h> 17#include <mach/irqs.h>
@@ -119,66 +118,3 @@ struct platform_device mxc_usbh2_device = {
119 .coherent_dma_mask = DMA_BIT_MASK(32), 118 .coherent_dma_mask = DMA_BIT_MASK(32),
120 }, 119 },
121}; 120};
122
123static struct mxc_gpio_port mxc_gpio_ports[] = {
124 {
125 .chip.label = "gpio-0",
126 .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR),
127 .irq = MX51_MXC_INT_GPIO1_LOW,
128 .irq_high = MX51_MXC_INT_GPIO1_HIGH,
129 .virtual_irq_start = MXC_GPIO_IRQ_START
130 },
131 {
132 .chip.label = "gpio-1",
133 .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR),
134 .irq = MX51_MXC_INT_GPIO2_LOW,
135 .irq_high = MX51_MXC_INT_GPIO2_HIGH,
136 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1
137 },
138 {
139 .chip.label = "gpio-2",
140 .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR),
141 .irq = MX51_MXC_INT_GPIO3_LOW,
142 .irq_high = MX51_MXC_INT_GPIO3_HIGH,
143 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2
144 },
145 {
146 .chip.label = "gpio-3",
147 .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR),
148 .irq = MX51_MXC_INT_GPIO4_LOW,
149 .irq_high = MX51_MXC_INT_GPIO4_HIGH,
150 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3
151 },
152 {
153 .chip.label = "gpio-4",
154 .base = MX53_IO_ADDRESS(MX53_GPIO5_BASE_ADDR),
155 .irq = MX53_INT_GPIO5_LOW,
156 .irq_high = MX53_INT_GPIO5_HIGH,
157 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 4
158 },
159 {
160 .chip.label = "gpio-5",
161 .base = MX53_IO_ADDRESS(MX53_GPIO6_BASE_ADDR),
162 .irq = MX53_INT_GPIO6_LOW,
163 .irq_high = MX53_INT_GPIO6_HIGH,
164 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 5
165 },
166 {
167 .chip.label = "gpio-6",
168 .base = MX53_IO_ADDRESS(MX53_GPIO7_BASE_ADDR),
169 .irq = MX53_INT_GPIO7_LOW,
170 .irq_high = MX53_INT_GPIO7_HIGH,
171 .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 6
172 },
173};
174
175int __init imx51_register_gpios(void)
176{
177 return mxc_gpio_init(mxc_gpio_ports, 4);
178}
179
180int __init imx53_register_gpios(void)
181{
182 return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
183}
184
diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c
index b9c363b514a9..77e374c726fa 100644
--- a/arch/arm/mach-mx5/mm-mx50.c
+++ b/arch/arm/mach-mx5/mm-mx50.c
@@ -26,7 +26,6 @@
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <mach/common.h> 27#include <mach/common.h>
28#include <mach/iomux-v3.h> 28#include <mach/iomux-v3.h>
29#include <mach/gpio.h>
30#include <mach/irqs.h> 29#include <mach/irqs.h>
31 30
32/* 31/*
@@ -56,17 +55,18 @@ void __init imx50_init_early(void)
56 mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR)); 55 mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR));
57} 56}
58 57
59static struct mxc_gpio_port imx50_gpio_ports[] = {
60 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 0, 1, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH),
61 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 1, 2, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH),
62 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 2, 3, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
63 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 3, 4, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
64 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 4, 5, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
65 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 5, 6, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
66};
67
68void __init mx50_init_irq(void) 58void __init mx50_init_irq(void)
69{ 59{
70 tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR)); 60 tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR));
71 mxc_gpio_init(imx50_gpio_ports, ARRAY_SIZE(imx50_gpio_ports)); 61}
62
63void __init imx50_soc_init(void)
64{
65 /* i.mx50 has the i.mx31 type gpio */
66 mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH);
67 mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH);
68 mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH);
69 mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH);
70 mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH);
71 mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH);
72} 72}
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index ff557301b42b..665843d6c2b2 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -69,8 +69,6 @@ void __init imx53_init_early(void)
69 mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR)); 69 mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR));
70} 70}
71 71
72int imx51_register_gpios(void);
73
74void __init mx51_init_irq(void) 72void __init mx51_init_irq(void)
75{ 73{
76 unsigned long tzic_addr; 74 unsigned long tzic_addr;
@@ -86,11 +84,8 @@ void __init mx51_init_irq(void)
86 panic("unable to map TZIC interrupt controller\n"); 84 panic("unable to map TZIC interrupt controller\n");
87 85
88 tzic_init_irq(tzic_virt); 86 tzic_init_irq(tzic_virt);
89 imx51_register_gpios();
90} 87}
91 88
92int imx53_register_gpios(void);
93
94void __init mx53_init_irq(void) 89void __init mx53_init_irq(void)
95{ 90{
96 unsigned long tzic_addr; 91 unsigned long tzic_addr;
@@ -103,5 +98,25 @@ void __init mx53_init_irq(void)
103 panic("unable to map TZIC interrupt controller\n"); 98 panic("unable to map TZIC interrupt controller\n");
104 99
105 tzic_init_irq(tzic_virt); 100 tzic_init_irq(tzic_virt);
106 imx53_register_gpios(); 101}
102
103void __init imx51_soc_init(void)
104{
105 /* i.mx51 has the i.mx31 type gpio */
106 mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO1_LOW, MX51_MXC_INT_GPIO1_HIGH);
107 mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO2_LOW, MX51_MXC_INT_GPIO2_HIGH);
108 mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO3_LOW, MX51_MXC_INT_GPIO3_HIGH);
109 mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO4_LOW, MX51_MXC_INT_GPIO4_HIGH);
110}
111
112void __init imx53_soc_init(void)
113{
114 /* i.mx53 has the i.mx31 type gpio */
115 mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH);
116 mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH);
117 mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH);
118 mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH);
119 mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH);
120 mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH);
121 mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH);
107} 122}
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 58e892376bf2..6c38262a3aaa 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,5 +1,5 @@
1# Common support 1# Common support
2obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o 2obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o
3 3
4obj-$(CONFIG_MXS_OCOTP) += ocotp.o 4obj-$(CONFIG_MXS_OCOTP) += ocotp.o
5obj-$(CONFIG_PM) += pm.o 5obj-$(CONFIG_PM) += pm.o
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index cfdb6b284702..fe3e847930c9 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -88,3 +88,14 @@ int __init mxs_add_amba_device(const struct amba_device *dev)
88 88
89 return amba_device_register(adev, &iomem_resource); 89 return amba_device_register(adev, &iomem_resource);
90} 90}
91
92struct device mxs_apbh_bus = {
93 .init_name = "mxs_apbh",
94 .parent = &platform_bus,
95};
96
97static int __init mxs_device_init(void)
98{
99 return device_register(&mxs_apbh_bus);
100}
101core_initcall(mxs_device_init);
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index 324f2824d38d..351915c683ff 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
6obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o 6obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o
7obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o 7obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o
8obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o 8obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o
9obj-y += platform-gpio-mxs.o
9obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o 10obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
diff --git a/arch/arm/mach-mxs/devices/platform-auart.c b/arch/arm/mach-mxs/devices/platform-auart.c
index 796606cce0ce..27608f5d2ac8 100644
--- a/arch/arm/mach-mxs/devices/platform-auart.c
+++ b/arch/arm/mach-mxs/devices/platform-auart.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <asm/sizes.h> 10#include <asm/sizes.h>
10#include <mach/mx23.h> 11#include <mach/mx23.h>
11#include <mach/mx28.h> 12#include <mach/mx28.h>
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 295c4424d5d9..6a0202b1016c 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -6,6 +6,7 @@
6 * Free Software Foundation. 6 * Free Software Foundation.
7 */ 7 */
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <linux/dma-mapping.h>
9#include <linux/err.h> 10#include <linux/err.h>
10#include <linux/init.h> 11#include <linux/init.h>
11 12
diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c
index 9859cf283335..ae96a4fd8f14 100644
--- a/arch/arm/mach-mxs/devices/platform-fec.c
+++ b/arch/arm/mach-mxs/devices/platform-fec.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <asm/sizes.h> 10#include <asm/sizes.h>
10#include <mach/mx28.h> 11#include <mach/mx28.h>
11#include <mach/devices-common.h> 12#include <mach/devices-common.h>
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
new file mode 100644
index 000000000000..ed0885e414e0
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it under
5 * the terms of the GNU General Public License version 2 as published by the
6 * Free Software Foundation.
7 */
8#include <linux/compiler.h>
9#include <linux/err.h>
10#include <linux/init.h>
11
12#include <mach/mx23.h>
13#include <mach/mx28.h>
14#include <mach/devices-common.h>
15
16struct platform_device *__init mxs_add_gpio(
17 int id, resource_size_t iobase, int irq)
18{
19 struct resource res[] = {
20 {
21 .start = iobase,
22 .end = iobase + SZ_8K - 1,
23 .flags = IORESOURCE_MEM,
24 }, {
25 .start = irq,
26 .end = irq,
27 .flags = IORESOURCE_IRQ,
28 },
29 };
30
31 return platform_device_register_resndata(&mxs_apbh_bus,
32 "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
33}
34
35static int __init mxs_add_mxs_gpio(void)
36{
37 if (cpu_is_mx23()) {
38 mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
39 mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
40 mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
41 }
42
43 if (cpu_is_mx28()) {
44 mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
45 mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
46 mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
47 mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
48 mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
49 }
50
51 return 0;
52}
53postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
deleted file mode 100644
index 2c950fef71a8..000000000000
--- a/arch/arm/mach-mxs/gpio.c
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 *
5 * Based on code from Freescale,
6 * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 * MA 02110-1301, USA.
21 */
22
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/gpio.h>
28#include <mach/mx23.h>
29#include <mach/mx28.h>
30#include <asm-generic/bug.h>
31
32#include "gpio.h"
33
34static struct mxs_gpio_port *mxs_gpio_ports;
35static int gpio_table_size;
36
37#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
38#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
39#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
40#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
41#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
42#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
43#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
44#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
45
46#define GPIO_INT_FALL_EDGE 0x0
47#define GPIO_INT_LOW_LEV 0x1
48#define GPIO_INT_RISE_EDGE 0x2
49#define GPIO_INT_HIGH_LEV 0x3
50#define GPIO_INT_LEV_MASK (1 << 0)
51#define GPIO_INT_POL_MASK (1 << 1)
52
53/* Note: This driver assumes 32 GPIOs are handled in one register */
54
55static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index)
56{
57 __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id));
58}
59
60static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index,
61 int enable)
62{
63 if (enable) {
64 __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id));
65 __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id));
66 } else {
67 __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id));
68 }
69}
70
71static void mxs_gpio_ack_irq(struct irq_data *d)
72{
73 u32 gpio = irq_to_gpio(d->irq);
74 clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f);
75}
76
77static void mxs_gpio_mask_irq(struct irq_data *d)
78{
79 u32 gpio = irq_to_gpio(d->irq);
80 set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0);
81}
82
83static void mxs_gpio_unmask_irq(struct irq_data *d)
84{
85 u32 gpio = irq_to_gpio(d->irq);
86 set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1);
87}
88
89static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset);
90
91static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
92{
93 u32 gpio = irq_to_gpio(d->irq);
94 u32 pin_mask = 1 << (gpio & 31);
95 struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
96 void __iomem *pin_addr;
97 int edge;
98
99 switch (type) {
100 case IRQ_TYPE_EDGE_RISING:
101 edge = GPIO_INT_RISE_EDGE;
102 break;
103 case IRQ_TYPE_EDGE_FALLING:
104 edge = GPIO_INT_FALL_EDGE;
105 break;
106 case IRQ_TYPE_LEVEL_LOW:
107 edge = GPIO_INT_LOW_LEV;
108 break;
109 case IRQ_TYPE_LEVEL_HIGH:
110 edge = GPIO_INT_HIGH_LEV;
111 break;
112 default:
113 return -EINVAL;
114 }
115
116 /* set level or edge */
117 pin_addr = port->base + PINCTRL_IRQLEV(port->id);
118 if (edge & GPIO_INT_LEV_MASK)
119 __mxs_setl(pin_mask, pin_addr);
120 else
121 __mxs_clrl(pin_mask, pin_addr);
122
123 /* set polarity */
124 pin_addr = port->base + PINCTRL_IRQPOL(port->id);
125 if (edge & GPIO_INT_POL_MASK)
126 __mxs_setl(pin_mask, pin_addr);
127 else
128 __mxs_clrl(pin_mask, pin_addr);
129
130 clear_gpio_irqstatus(port, gpio & 0x1f);
131
132 return 0;
133}
134
135/* MXS has one interrupt *per* gpio port */
136static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
137{
138 u32 irq_stat;
139 struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq);
140 u32 gpio_irq_no_base = port->virtual_irq_start;
141
142 desc->irq_data.chip->irq_ack(&desc->irq_data);
143
144 irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
145 __raw_readl(port->base + PINCTRL_IRQEN(port->id));
146
147 while (irq_stat != 0) {
148 int irqoffset = fls(irq_stat) - 1;
149 generic_handle_irq(gpio_irq_no_base + irqoffset);
150 irq_stat &= ~(1 << irqoffset);
151 }
152}
153
154/*
155 * Set interrupt number "irq" in the GPIO as a wake-up source.
156 * While system is running, all registered GPIO interrupts need to have
157 * wake-up enabled. When system is suspended, only selected GPIO interrupts
158 * need to have wake-up enabled.
159 * @param irq interrupt source number
160 * @param enable enable as wake-up if equal to non-zero
161 * @return This function returns 0 on success.
162 */
163static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
164{
165 u32 gpio = irq_to_gpio(d->irq);
166 u32 gpio_idx = gpio & 0x1f;
167 struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
168
169 if (enable) {
170 if (port->irq_high && (gpio_idx >= 16))
171 enable_irq_wake(port->irq_high);
172 else
173 enable_irq_wake(port->irq);
174 } else {
175 if (port->irq_high && (gpio_idx >= 16))
176 disable_irq_wake(port->irq_high);
177 else
178 disable_irq_wake(port->irq);
179 }
180
181 return 0;
182}
183
184static struct irq_chip gpio_irq_chip = {
185 .name = "mxs gpio",
186 .irq_ack = mxs_gpio_ack_irq,
187 .irq_mask = mxs_gpio_mask_irq,
188 .irq_unmask = mxs_gpio_unmask_irq,
189 .irq_set_type = mxs_gpio_set_irq_type,
190 .irq_set_wake = mxs_gpio_set_wake_irq,
191};
192
193static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset,
194 int dir)
195{
196 struct mxs_gpio_port *port =
197 container_of(chip, struct mxs_gpio_port, chip);
198 void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id);
199
200 if (dir)
201 __mxs_setl(1 << offset, pin_addr);
202 else
203 __mxs_clrl(1 << offset, pin_addr);
204}
205
206static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset)
207{
208 struct mxs_gpio_port *port =
209 container_of(chip, struct mxs_gpio_port, chip);
210
211 return (__raw_readl(port->base + PINCTRL_DIN(port->id)) >> offset) & 1;
212}
213
214static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
215{
216 struct mxs_gpio_port *port =
217 container_of(chip, struct mxs_gpio_port, chip);
218 void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id);
219
220 if (value)
221 __mxs_setl(1 << offset, pin_addr);
222 else
223 __mxs_clrl(1 << offset, pin_addr);
224}
225
226static int mxs_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
227{
228 struct mxs_gpio_port *port =
229 container_of(chip, struct mxs_gpio_port, chip);
230
231 return port->virtual_irq_start + offset;
232}
233
234static int mxs_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
235{
236 mxs_set_gpio_direction(chip, offset, 0);
237 return 0;
238}
239
240static int mxs_gpio_direction_output(struct gpio_chip *chip,
241 unsigned offset, int value)
242{
243 mxs_gpio_set(chip, offset, value);
244 mxs_set_gpio_direction(chip, offset, 1);
245 return 0;
246}
247
248int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt)
249{
250 int i, j;
251
252 /* save for local usage */
253 mxs_gpio_ports = port;
254 gpio_table_size = cnt;
255
256 pr_info("MXS GPIO hardware\n");
257
258 for (i = 0; i < cnt; i++) {
259 /* disable the interrupt and clear the status */
260 __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i));
261 __raw_writel(0, port[i].base + PINCTRL_IRQEN(i));
262
263 /* clear address has to be used to clear IRQSTAT bits */
264 __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i));
265
266 for (j = port[i].virtual_irq_start;
267 j < port[i].virtual_irq_start + 32; j++) {
268 irq_set_chip_and_handler(j, &gpio_irq_chip,
269 handle_level_irq);
270 set_irq_flags(j, IRQF_VALID);
271 }
272
273 /* setup one handler for each entry */
274 irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler);
275 irq_set_handler_data(port[i].irq, &port[i]);
276
277 /* register gpio chip */
278 port[i].chip.direction_input = mxs_gpio_direction_input;
279 port[i].chip.direction_output = mxs_gpio_direction_output;
280 port[i].chip.get = mxs_gpio_get;
281 port[i].chip.set = mxs_gpio_set;
282 port[i].chip.to_irq = mxs_gpio_to_irq;
283 port[i].chip.base = i * 32;
284 port[i].chip.ngpio = 32;
285
286 /* its a serious configuration bug when it fails */
287 BUG_ON(gpiochip_add(&port[i].chip) < 0);
288 }
289
290 return 0;
291}
292
293#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR)
294#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR)
295
296#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \
297 { \
298 .chip.label = "gpio-" #_id, \
299 .id = _id, \
300 .irq = _irq, \
301 .base = _base, \
302 .virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \
303 }
304
305#ifdef CONFIG_SOC_IMX23
306static struct mxs_gpio_port mx23_gpio_ports[] = {
307 DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0),
308 DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1),
309 DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2),
310};
311
312int __init mx23_register_gpios(void)
313{
314 return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports));
315}
316#endif
317
318#ifdef CONFIG_SOC_IMX28
319static struct mxs_gpio_port mx28_gpio_ports[] = {
320 DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0),
321 DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1),
322 DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2),
323 DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3),
324 DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4),
325};
326
327int __init mx28_register_gpios(void)
328{
329 return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports));
330}
331#endif
diff --git a/arch/arm/mach-mxs/gpio.h b/arch/arm/mach-mxs/gpio.h
deleted file mode 100644
index 005bb06630b1..000000000000
--- a/arch/arm/mach-mxs/gpio.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20#ifndef __MXS_GPIO_H__
21#define __MXS_GPIO_H__
22
23struct mxs_gpio_port {
24 void __iomem *base;
25 int id;
26 int irq;
27 int irq_high;
28 int virtual_irq_start;
29 struct gpio_chip chip;
30};
31
32int mxs_gpio_init(struct mxs_gpio_port*, int);
33
34#endif /* __MXS_GPIO_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 7a37469ed5bf..812d7a813a78 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -11,6 +11,8 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/amba/bus.h> 12#include <linux/amba/bus.h>
13 13
14extern struct device mxs_apbh_bus;
15
14struct platform_device *mxs_add_platform_device_dmamask( 16struct platform_device *mxs_add_platform_device_dmamask(
15 const char *name, int id, 17 const char *name, int id,
16 const struct resource *res, unsigned int num_resources, 18 const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index eacdc6b0e70a..56767a5cce0e 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -26,7 +26,6 @@
26#include <mach/iomux-mx28.h> 26#include <mach/iomux-mx28.h>
27 27
28#include "devices-mx28.h" 28#include "devices-mx28.h"
29#include "gpio.h"
30 29
31#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13) 30#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13)
32#define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15) 31#define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15)
diff --git a/arch/arm/mach-mxs/mm-mx23.c b/arch/arm/mach-mxs/mm-mx23.c
index 5148cd64a6b7..1b2345ac1a87 100644
--- a/arch/arm/mach-mxs/mm-mx23.c
+++ b/arch/arm/mach-mxs/mm-mx23.c
@@ -41,5 +41,4 @@ void __init mx23_map_io(void)
41void __init mx23_init_irq(void) 41void __init mx23_init_irq(void)
42{ 42{
43 icoll_init_irq(); 43 icoll_init_irq();
44 mx23_register_gpios();
45} 44}
diff --git a/arch/arm/mach-mxs/mm-mx28.c b/arch/arm/mach-mxs/mm-mx28.c
index 7e4cea32ebc6..b6e18ddb92c0 100644
--- a/arch/arm/mach-mxs/mm-mx28.c
+++ b/arch/arm/mach-mxs/mm-mx28.c
@@ -41,5 +41,4 @@ void __init mx28_map_io(void)
41void __init mx28_init_irq(void) 41void __init mx28_init_irq(void)
42{ 42{
43 icoll_init_irq(); 43 icoll_init_irq();
44 mx28_register_gpios();
45} 44}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index de88c9297b68..f49ce85d2448 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = {
215 .delay = 9, 215 .delay = 9,
216}; 216};
217 217
218static struct platform_device ams_delta_kp_device __initdata = { 218static struct platform_device ams_delta_kp_device = {
219 .name = "omap-keypad", 219 .name = "omap-keypad",
220 .id = -1, 220 .id = -1,
221 .dev = { 221 .dev = {
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = {
225 .resource = ams_delta_kp_resources, 225 .resource = ams_delta_kp_resources,
226}; 226};
227 227
228static struct platform_device ams_delta_lcd_device __initdata = { 228static struct platform_device ams_delta_lcd_device = {
229 .name = "lcd_ams_delta", 229 .name = "lcd_ams_delta",
230 .id = -1, 230 .id = -1,
231}; 231};
232 232
233static struct platform_device ams_delta_led_device __initdata = { 233static struct platform_device ams_delta_led_device = {
234 .name = "ams-delta-led", 234 .name = "ams-delta-led",
235 .id = -1 235 .id = -1
236}; 236};
@@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = {
267 .power = ams_delta_camera_power, 267 .power = ams_delta_camera_power,
268}; 268};
269 269
270static struct platform_device ams_delta_camera_device __initdata = { 270static struct platform_device ams_delta_camera_device = {
271 .name = "soc-camera-pdrv", 271 .name = "soc-camera-pdrv",
272 .id = 0, 272 .id = 0,
273 .dev = { 273 .dev = {
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 04c4b04cf54e..399da4ce017b 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -34,14 +34,25 @@ static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
34 }, 34 },
35}; 35};
36 36
37static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
38 .revision = USHRT_MAX,
39 .direction = OMAP_MPUIO_IO_CNTL,
40 .datain = OMAP_MPUIO_INPUT_LATCH,
41 .dataout = OMAP_MPUIO_OUTPUT,
42 .irqstatus = OMAP_MPUIO_GPIO_INT,
43 .irqenable = OMAP_MPUIO_GPIO_MASKIT,
44 .irqenable_inv = true,
45};
46
37static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = { 47static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
38 .virtual_irq_start = IH_MPUIO_BASE, 48 .virtual_irq_start = IH_MPUIO_BASE,
39 .bank_type = METHOD_MPUIO, 49 .bank_type = METHOD_MPUIO,
40 .bank_width = 16, 50 .bank_width = 16,
41 .bank_stride = 1, 51 .bank_stride = 1,
52 .regs = &omap15xx_mpuio_regs,
42}; 53};
43 54
44static struct __initdata platform_device omap15xx_mpu_gpio = { 55static struct platform_device omap15xx_mpu_gpio = {
45 .name = "omap_gpio", 56 .name = "omap_gpio",
46 .id = 0, 57 .id = 0,
47 .dev = { 58 .dev = {
@@ -64,13 +75,24 @@ static struct __initdata resource omap15xx_gpio_resources[] = {
64 }, 75 },
65}; 76};
66 77
78static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
79 .revision = USHRT_MAX,
80 .direction = OMAP1510_GPIO_DIR_CONTROL,
81 .datain = OMAP1510_GPIO_DATA_INPUT,
82 .dataout = OMAP1510_GPIO_DATA_OUTPUT,
83 .irqstatus = OMAP1510_GPIO_INT_STATUS,
84 .irqenable = OMAP1510_GPIO_INT_MASK,
85 .irqenable_inv = true,
86};
87
67static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = { 88static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
68 .virtual_irq_start = IH_GPIO_BASE, 89 .virtual_irq_start = IH_GPIO_BASE,
69 .bank_type = METHOD_GPIO_1510, 90 .bank_type = METHOD_GPIO_1510,
70 .bank_width = 16, 91 .bank_width = 16,
92 .regs = &omap15xx_gpio_regs,
71}; 93};
72 94
73static struct __initdata platform_device omap15xx_gpio = { 95static struct platform_device omap15xx_gpio = {
74 .name = "omap_gpio", 96 .name = "omap_gpio",
75 .id = 1, 97 .id = 1,
76 .dev = { 98 .dev = {
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 5dd0d4c82b24..0f399bd0e70e 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -37,14 +37,25 @@ static struct __initdata resource omap16xx_mpu_gpio_resources[] = {
37 }, 37 },
38}; 38};
39 39
40static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
41 .revision = USHRT_MAX,
42 .direction = OMAP_MPUIO_IO_CNTL,
43 .datain = OMAP_MPUIO_INPUT_LATCH,
44 .dataout = OMAP_MPUIO_OUTPUT,
45 .irqstatus = OMAP_MPUIO_GPIO_INT,
46 .irqenable = OMAP_MPUIO_GPIO_MASKIT,
47 .irqenable_inv = true,
48};
49
40static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { 50static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
41 .virtual_irq_start = IH_MPUIO_BASE, 51 .virtual_irq_start = IH_MPUIO_BASE,
42 .bank_type = METHOD_MPUIO, 52 .bank_type = METHOD_MPUIO,
43 .bank_width = 16, 53 .bank_width = 16,
44 .bank_stride = 1, 54 .bank_stride = 1,
55 .regs = &omap16xx_mpuio_regs,
45}; 56};
46 57
47static struct __initdata platform_device omap16xx_mpu_gpio = { 58static struct platform_device omap16xx_mpu_gpio = {
48 .name = "omap_gpio", 59 .name = "omap_gpio",
49 .id = 0, 60 .id = 0,
50 .dev = { 61 .dev = {
@@ -67,13 +78,27 @@ static struct __initdata resource omap16xx_gpio1_resources[] = {
67 }, 78 },
68}; 79};
69 80
81static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
82 .revision = OMAP1610_GPIO_REVISION,
83 .direction = OMAP1610_GPIO_DIRECTION,
84 .set_dataout = OMAP1610_GPIO_SET_DATAOUT,
85 .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT,
86 .datain = OMAP1610_GPIO_DATAIN,
87 .dataout = OMAP1610_GPIO_DATAOUT,
88 .irqstatus = OMAP1610_GPIO_IRQSTATUS1,
89 .irqenable = OMAP1610_GPIO_IRQENABLE1,
90 .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1,
91 .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1,
92};
93
70static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { 94static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
71 .virtual_irq_start = IH_GPIO_BASE, 95 .virtual_irq_start = IH_GPIO_BASE,
72 .bank_type = METHOD_GPIO_1610, 96 .bank_type = METHOD_GPIO_1610,
73 .bank_width = 16, 97 .bank_width = 16,
98 .regs = &omap16xx_gpio_regs,
74}; 99};
75 100
76static struct __initdata platform_device omap16xx_gpio1 = { 101static struct platform_device omap16xx_gpio1 = {
77 .name = "omap_gpio", 102 .name = "omap_gpio",
78 .id = 1, 103 .id = 1,
79 .dev = { 104 .dev = {
@@ -100,9 +125,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
100 .virtual_irq_start = IH_GPIO_BASE + 16, 125 .virtual_irq_start = IH_GPIO_BASE + 16,
101 .bank_type = METHOD_GPIO_1610, 126 .bank_type = METHOD_GPIO_1610,
102 .bank_width = 16, 127 .bank_width = 16,
128 .regs = &omap16xx_gpio_regs,
103}; 129};
104 130
105static struct __initdata platform_device omap16xx_gpio2 = { 131static struct platform_device omap16xx_gpio2 = {
106 .name = "omap_gpio", 132 .name = "omap_gpio",
107 .id = 2, 133 .id = 2,
108 .dev = { 134 .dev = {
@@ -129,9 +155,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
129 .virtual_irq_start = IH_GPIO_BASE + 32, 155 .virtual_irq_start = IH_GPIO_BASE + 32,
130 .bank_type = METHOD_GPIO_1610, 156 .bank_type = METHOD_GPIO_1610,
131 .bank_width = 16, 157 .bank_width = 16,
158 .regs = &omap16xx_gpio_regs,
132}; 159};
133 160
134static struct __initdata platform_device omap16xx_gpio3 = { 161static struct platform_device omap16xx_gpio3 = {
135 .name = "omap_gpio", 162 .name = "omap_gpio",
136 .id = 3, 163 .id = 3,
137 .dev = { 164 .dev = {
@@ -158,9 +185,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
158 .virtual_irq_start = IH_GPIO_BASE + 48, 185 .virtual_irq_start = IH_GPIO_BASE + 48,
159 .bank_type = METHOD_GPIO_1610, 186 .bank_type = METHOD_GPIO_1610,
160 .bank_width = 16, 187 .bank_width = 16,
188 .regs = &omap16xx_gpio_regs,
161}; 189};
162 190
163static struct __initdata platform_device omap16xx_gpio4 = { 191static struct platform_device omap16xx_gpio4 = {
164 .name = "omap_gpio", 192 .name = "omap_gpio",
165 .id = 4, 193 .id = 4,
166 .dev = { 194 .dev = {
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index 1204c8b871af..5ab63eab0ff5 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -39,14 +39,25 @@ static struct __initdata resource omap7xx_mpu_gpio_resources[] = {
39 }, 39 },
40}; 40};
41 41
42static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
43 .revision = USHRT_MAX,
44 .direction = OMAP_MPUIO_IO_CNTL / 2,
45 .datain = OMAP_MPUIO_INPUT_LATCH / 2,
46 .dataout = OMAP_MPUIO_OUTPUT / 2,
47 .irqstatus = OMAP_MPUIO_GPIO_INT / 2,
48 .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2,
49 .irqenable_inv = true,
50};
51
42static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = { 52static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
43 .virtual_irq_start = IH_MPUIO_BASE, 53 .virtual_irq_start = IH_MPUIO_BASE,
44 .bank_type = METHOD_MPUIO, 54 .bank_type = METHOD_MPUIO,
45 .bank_width = 32, 55 .bank_width = 32,
46 .bank_stride = 2, 56 .bank_stride = 2,
57 .regs = &omap7xx_mpuio_regs,
47}; 58};
48 59
49static struct __initdata platform_device omap7xx_mpu_gpio = { 60static struct platform_device omap7xx_mpu_gpio = {
50 .name = "omap_gpio", 61 .name = "omap_gpio",
51 .id = 0, 62 .id = 0,
52 .dev = { 63 .dev = {
@@ -69,13 +80,24 @@ static struct __initdata resource omap7xx_gpio1_resources[] = {
69 }, 80 },
70}; 81};
71 82
83static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
84 .revision = USHRT_MAX,
85 .direction = OMAP7XX_GPIO_DIR_CONTROL,
86 .datain = OMAP7XX_GPIO_DATA_INPUT,
87 .dataout = OMAP7XX_GPIO_DATA_OUTPUT,
88 .irqstatus = OMAP7XX_GPIO_INT_STATUS,
89 .irqenable = OMAP7XX_GPIO_INT_MASK,
90 .irqenable_inv = true,
91};
92
72static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = { 93static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
73 .virtual_irq_start = IH_GPIO_BASE, 94 .virtual_irq_start = IH_GPIO_BASE,
74 .bank_type = METHOD_GPIO_7XX, 95 .bank_type = METHOD_GPIO_7XX,
75 .bank_width = 32, 96 .bank_width = 32,
97 .regs = &omap7xx_gpio_regs,
76}; 98};
77 99
78static struct __initdata platform_device omap7xx_gpio1 = { 100static struct platform_device omap7xx_gpio1 = {
79 .name = "omap_gpio", 101 .name = "omap_gpio",
80 .id = 1, 102 .id = 1,
81 .dev = { 103 .dev = {
@@ -102,9 +124,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
102 .virtual_irq_start = IH_GPIO_BASE + 32, 124 .virtual_irq_start = IH_GPIO_BASE + 32,
103 .bank_type = METHOD_GPIO_7XX, 125 .bank_type = METHOD_GPIO_7XX,
104 .bank_width = 32, 126 .bank_width = 32,
127 .regs = &omap7xx_gpio_regs,
105}; 128};
106 129
107static struct __initdata platform_device omap7xx_gpio2 = { 130static struct platform_device omap7xx_gpio2 = {
108 .name = "omap_gpio", 131 .name = "omap_gpio",
109 .id = 2, 132 .id = 2,
110 .dev = { 133 .dev = {
@@ -131,9 +154,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
131 .virtual_irq_start = IH_GPIO_BASE + 64, 154 .virtual_irq_start = IH_GPIO_BASE + 64,
132 .bank_type = METHOD_GPIO_7XX, 155 .bank_type = METHOD_GPIO_7XX,
133 .bank_width = 32, 156 .bank_width = 32,
157 .regs = &omap7xx_gpio_regs,
134}; 158};
135 159
136static struct __initdata platform_device omap7xx_gpio3 = { 160static struct platform_device omap7xx_gpio3 = {
137 .name = "omap_gpio", 161 .name = "omap_gpio",
138 .id = 3, 162 .id = 3,
139 .dev = { 163 .dev = {
@@ -160,9 +184,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
160 .virtual_irq_start = IH_GPIO_BASE + 96, 184 .virtual_irq_start = IH_GPIO_BASE + 96,
161 .bank_type = METHOD_GPIO_7XX, 185 .bank_type = METHOD_GPIO_7XX,
162 .bank_width = 32, 186 .bank_width = 32,
187 .regs = &omap7xx_gpio_regs,
163}; 188};
164 189
165static struct __initdata platform_device omap7xx_gpio4 = { 190static struct platform_device omap7xx_gpio4 = {
166 .name = "omap_gpio", 191 .name = "omap_gpio",
167 .id = 4, 192 .id = 4,
168 .dev = { 193 .dev = {
@@ -189,9 +214,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
189 .virtual_irq_start = IH_GPIO_BASE + 128, 214 .virtual_irq_start = IH_GPIO_BASE + 128,
190 .bank_type = METHOD_GPIO_7XX, 215 .bank_type = METHOD_GPIO_7XX,
191 .bank_width = 32, 216 .bank_width = 32,
217 .regs = &omap7xx_gpio_regs,
192}; 218};
193 219
194static struct __initdata platform_device omap7xx_gpio5 = { 220static struct platform_device omap7xx_gpio5 = {
195 .name = "omap_gpio", 221 .name = "omap_gpio",
196 .id = 5, 222 .id = 5,
197 .dev = { 223 .dev = {
@@ -218,9 +244,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
218 .virtual_irq_start = IH_GPIO_BASE + 160, 244 .virtual_irq_start = IH_GPIO_BASE + 160,
219 .bank_type = METHOD_GPIO_7XX, 245 .bank_type = METHOD_GPIO_7XX,
220 .bank_width = 32, 246 .bank_width = 32,
247 .regs = &omap7xx_gpio_regs,
221}; 248};
222 249
223static struct __initdata platform_device omap7xx_gpio6 = { 250static struct platform_device omap7xx_gpio6 = {
224 .name = "omap_gpio", 251 .name = "omap_gpio",
225 .id = 6, 252 .id = 6,
226 .dev = { 253 .dev = {
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 334fb8871bc3..943072d5a1d5 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev)
32 if (ret) 32 if (ret)
33 return ret; 33 return ret;
34 34
35 ret = pm_runtime_clk_suspend(dev); 35 ret = pm_clk_suspend(dev);
36 if (ret) { 36 if (ret) {
37 pm_generic_runtime_resume(dev); 37 pm_generic_runtime_resume(dev);
38 return ret; 38 return ret;
@@ -45,24 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev)
45{ 45{
46 dev_dbg(dev, "%s\n", __func__); 46 dev_dbg(dev, "%s\n", __func__);
47 47
48 pm_runtime_clk_resume(dev); 48 pm_clk_resume(dev);
49 return pm_generic_runtime_resume(dev); 49 return pm_generic_runtime_resume(dev);
50} 50}
51 51
52static struct dev_power_domain default_power_domain = { 52static struct dev_pm_domain default_pm_domain = {
53 .ops = { 53 .ops = {
54 .runtime_suspend = omap1_pm_runtime_suspend, 54 .runtime_suspend = omap1_pm_runtime_suspend,
55 .runtime_resume = omap1_pm_runtime_resume, 55 .runtime_resume = omap1_pm_runtime_resume,
56 USE_PLATFORM_PM_SLEEP_OPS 56 USE_PLATFORM_PM_SLEEP_OPS
57 }, 57 },
58}; 58};
59#define OMAP1_PWR_DOMAIN (&default_power_domain) 59#define OMAP1_PM_DOMAIN (&default_pm_domain)
60#else 60#else
61#define OMAP1_PWR_DOMAIN NULL 61#define OMAP1_PM_DOMAIN NULL
62#endif /* CONFIG_PM_RUNTIME */ 62#endif /* CONFIG_PM_RUNTIME */
63 63
64static struct pm_clk_notifier_block platform_bus_notifier = { 64static struct pm_clk_notifier_block platform_bus_notifier = {
65 .pwr_domain = OMAP1_PWR_DOMAIN, 65 .pm_domain = OMAP1_PM_DOMAIN,
66 .con_ids = { "ick", "fck", NULL, }, 66 .con_ids = { "ick", "fck", NULL, },
67}; 67};
68 68
@@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void)
71 if (!cpu_class_is_omap1()) 71 if (!cpu_class_is_omap1())
72 return -ENODEV; 72 return -ENODEV;
73 73
74 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 74 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 990366726c58..88bd6f7705f0 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module =
558 .subdev_board_info = &rx51_si4713_board_info, 558 .subdev_board_info = &rx51_si4713_board_info,
559}; 559};
560 560
561static struct platform_device rx51_si4713_dev __initdata_or_module = { 561static struct platform_device rx51_si4713_dev = {
562 .name = "radio-si4713", 562 .name = "radio-si4713",
563 .id = -1, 563 .id = -1,
564 .dev = { 564 .dev = {
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9529842ae054..2765cdc3152d 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
61 pdata->dbck_flag = dev_attr->dbck_flag; 61 pdata->dbck_flag = dev_attr->dbck_flag;
62 pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1); 62 pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
63 63
64 pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
65 if (!pdata) {
66 pr_err("gpio%d: Memory allocation failed\n", id);
67 return -ENOMEM;
68 }
69
64 switch (oh->class->rev) { 70 switch (oh->class->rev) {
65 case 0: 71 case 0:
66 case 1: 72 case 1:
67 pdata->bank_type = METHOD_GPIO_24XX; 73 pdata->bank_type = METHOD_GPIO_24XX;
74 pdata->regs->revision = OMAP24XX_GPIO_REVISION;
75 pdata->regs->direction = OMAP24XX_GPIO_OE;
76 pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
77 pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
78 pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
79 pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
80 pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
81 pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
82 pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
83 pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
84 pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
85 pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
86 pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
68 break; 87 break;
69 case 2: 88 case 2:
70 pdata->bank_type = METHOD_GPIO_44XX; 89 pdata->bank_type = METHOD_GPIO_44XX;
90 pdata->regs->revision = OMAP4_GPIO_REVISION;
91 pdata->regs->direction = OMAP4_GPIO_OE;
92 pdata->regs->datain = OMAP4_GPIO_DATAIN;
93 pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
94 pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
95 pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
96 pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
97 pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
98 pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
99 pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
100 pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
101 pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
102 pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
71 break; 103 break;
72 default: 104 default:
73 WARN(1, "Invalid gpio bank_type\n"); 105 WARN(1, "Invalid gpio bank_type\n");
@@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
87 return PTR_ERR(od); 119 return PTR_ERR(od);
88 } 120 }
89 121
122 omap_device_disable_idle_on_suspend(od);
123
90 gpio_bank_count++; 124 gpio_bank_count++;
91 return 0; 125 return 0;
92} 126}
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b7b8cb..466fc722fa0f 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", 805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
806 name, oh->name); 806 name, oh->name);
807 807
808 omap_device_disable_idle_on_suspend(od);
808 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); 809 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
809 810
810 uart->irq = oh->mpu_irqs[0].irq; 811 uart->irq = oh->mpu_irqs[0].irq;
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 87ae3129f4f7..b27544bcafcb 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
347 if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && 347 if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
348 (GPDR(i) & GPIO_bit(i))) { 348 (GPDR(i) & GPIO_bit(i))) {
349 if (GPLR(i) & GPIO_bit(i)) 349 if (GPLR(i) & GPIO_bit(i))
350 PGSR(i) |= GPIO_bit(i); 350 PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
351 else 351 else
352 PGSR(i) &= ~GPIO_bit(i); 352 PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
353 } 353 }
354 } 354 }
355 355
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index d130f77b6d11..2f37d43f51b6 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
573 .xres = 480, 573 .xres = 480,
574 .yres = 272, 574 .yres = 272,
575 .bpp = 16, 575 .bpp = 16,
576 .hsync_len = 4, 576 .hsync_len = 41,
577 .left_margin = 2, 577 .left_margin = 2,
578 .right_margin = 1, 578 .right_margin = 1,
579 .vsync_len = 1, 579 .vsync_len = 10,
580 .upper_margin = 3, 580 .upper_margin = 3,
581 .lower_margin = 1, 581 .lower_margin = 1,
582 .sync = 0, 582 .sync = 0,
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
596{ 596{
597 int ret; 597 int ret;
598 598
599 pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
600
601 /* Earlier devices had the backlight regulator controlled
602 * via PWM, later versions use another controller for that */
603 if ((system_rev & 0xff) < 2) {
604 mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
605 pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
606 platform_device_register(&raumfeld_pwm_backlight_device);
607 } else
608 platform_device_register(&raumfeld_lt3593_device);
609
610 ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); 599 ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
611 if (ret < 0) 600 if (ret < 0)
612 pr_warning("Unable to request GPIO_TFT_VA_EN\n"); 601 pr_warning("Unable to request GPIO_TFT_VA_EN\n");
613 else 602 else
614 gpio_direction_output(GPIO_TFT_VA_EN, 1); 603 gpio_direction_output(GPIO_TFT_VA_EN, 1);
615 604
605 msleep(100);
606
616 ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); 607 ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
617 if (ret < 0) 608 if (ret < 0)
618 pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); 609 pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
619 else 610 else
620 gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); 611 gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
621 612
613 /* Hardware revision 2 has the backlight regulator controlled
614 * by an LT3593, earlier and later devices use PWM for that. */
615 if ((system_rev & 0xff) == 2) {
616 platform_device_register(&raumfeld_lt3593_device);
617 } else {
618 mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
619 pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
620 platform_device_register(&raumfeld_pwm_backlight_device);
621 }
622
623 pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
622 platform_device_register(&pxa3xx_device_gcu); 624 platform_device_register(&pxa3xx_device_gcu);
623} 625}
624 626
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = {
657 659
658#define SPI_AK4104 \ 660#define SPI_AK4104 \
659{ \ 661{ \
660 .modalias = "ak4104", \ 662 .modalias = "ak4104-codec", \
661 .max_speed_hz = 10000, \ 663 .max_speed_hz = 10000, \
662 .bus_num = 0, \ 664 .bus_num = 0, \
663 .chip_select = 0, \ 665 .chip_select = 0, \
664 .controller_data = (void *) GPIO_SPDIF_CS, \ 666 .controller_data = (void *) GPIO_SPDIF_CS, \
665} 667}
666 668
diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
deleted file mode 100644
index dcef2287cb38..000000000000
--- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2410 - SPI Controller platform_device info
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_SPIGPIO_H
14#define __ASM_ARCH_SPIGPIO_H __FILE__
15
16struct s3c2410_spigpio_info {
17 unsigned long pin_clk;
18 unsigned long pin_mosi;
19 unsigned long pin_miso;
20
21 int num_chipselect;
22 int bus_num;
23
24 void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
25};
26
27
28#endif /* __ASM_ARCH_SPIGPIO_H */
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index e8f49feef28c..f44f77531b1e 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -32,7 +32,7 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/serial_core.h> 33#include <linux/serial_core.h>
34#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
35#include <linux/spi/spi_bitbang.h> 35#include <linux/spi/spi_gpio.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/mtd/mtd.h> 37#include <linux/mtd/mtd.h>
38#include <linux/mtd/nand.h> 38#include <linux/mtd/nand.h>
@@ -53,8 +53,6 @@
53#include <mach/fb.h> 53#include <mach/fb.h>
54#include <plat/nand.h> 54#include <plat/nand.h>
55#include <plat/udc.h> 55#include <plat/udc.h>
56#include <mach/spi.h>
57#include <mach/spi-gpio.h>
58#include <plat/iic.h> 56#include <plat/iic.h>
59 57
60#include <plat/common-smdk.h> 58#include <plat/common-smdk.h>
@@ -216,32 +214,16 @@ static struct platform_device qt2410_led = {
216 214
217/* SPI */ 215/* SPI */
218 216
219static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs) 217static struct spi_gpio_platform_data spi_gpio_cfg = {
220{ 218 .sck = S3C2410_GPG(7),
221 switch (cs) { 219 .mosi = S3C2410_GPG(6),
222 case BITBANG_CS_ACTIVE: 220 .miso = S3C2410_GPG(5),
223 gpio_set_value(S3C2410_GPB(5), 0);
224 break;
225 case BITBANG_CS_INACTIVE:
226 gpio_set_value(S3C2410_GPB(5), 1);
227 break;
228 }
229}
230
231static struct s3c2410_spigpio_info spi_gpio_cfg = {
232 .pin_clk = S3C2410_GPG(7),
233 .pin_mosi = S3C2410_GPG(6),
234 .pin_miso = S3C2410_GPG(5),
235 .chip_select = &spi_gpio_cs,
236}; 221};
237 222
238
239static struct platform_device qt2410_spi = { 223static struct platform_device qt2410_spi = {
240 .name = "s3c24xx-spi-gpio", 224 .name = "spi-gpio",
241 .id = 1, 225 .id = 1,
242 .dev = { 226 .dev.platform_data = &spi_gpio_cfg,
243 .platform_data = &spi_gpio_cfg,
244 },
245}; 227};
246 228
247/* Board devices */ 229/* Board devices */
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 85dcaeb9e62f..5eeb47580b0c 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -25,6 +25,7 @@
25#include <video/ili9320.h> 25#include <video/ili9320.h>
26 26
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/spi_gpio.h>
28 29
29#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
30#include <asm/mach/map.h> 31#include <asm/mach/map.h>
@@ -38,7 +39,6 @@
38#include <mach/regs-gpio.h> 39#include <mach/regs-gpio.h>
39#include <mach/regs-mem.h> 40#include <mach/regs-mem.h>
40#include <mach/regs-lcd.h> 41#include <mach/regs-lcd.h>
41#include <mach/spi-gpio.h>
42#include <mach/fb.h> 42#include <mach/fb.h>
43 43
44#include <asm/mach-types.h> 44#include <asm/mach-types.h>
@@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = {
389 389
390/* LCD SPI support */ 390/* LCD SPI support */
391 391
392static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs) 392static struct spi_gpio_platform_data jive_lcd_spi = {
393{ 393 .sck = S3C2410_GPG(8),
394 gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1); 394 .mosi = S3C2410_GPB(8),
395} 395 .miso = SPI_GPIO_NO_MISO,
396
397static struct s3c2410_spigpio_info jive_lcd_spi = {
398 .bus_num = 1,
399 .pin_clk = S3C2410_GPG(8),
400 .pin_mosi = S3C2410_GPB(8),
401 .num_chipselect = 1,
402 .chip_select = jive_lcd_spi_chipselect,
403}; 396};
404 397
405static struct platform_device jive_device_lcdspi = { 398static struct platform_device jive_device_lcdspi = {
406 .name = "spi_s3c24xx_gpio", 399 .name = "spi-gpio",
407 .id = 1, 400 .id = 1,
408 .num_resources = 0,
409 .dev.platform_data = &jive_lcd_spi, 401 .dev.platform_data = &jive_lcd_spi,
410}; 402};
411 403
412/* WM8750 audio code SPI definition */
413 404
414static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs) 405/* WM8750 audio code SPI definition */
415{
416 gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
417}
418 406
419static struct s3c2410_spigpio_info jive_wm8750_spi = { 407static struct spi_gpio_platform_data jive_wm8750_spi = {
420 .bus_num = 2, 408 .sck = S3C2410_GPB(4),
421 .pin_clk = S3C2410_GPB(4), 409 .mosi = S3C2410_GPB(9),
422 .pin_mosi = S3C2410_GPB(9), 410 .miso = SPI_GPIO_NO_MISO,
423 .num_chipselect = 1,
424 .chip_select = jive_wm8750_chipselect,
425}; 411};
426 412
427static struct platform_device jive_device_wm8750 = { 413static struct platform_device jive_device_wm8750 = {
428 .name = "spi_s3c24xx_gpio", 414 .name = "spi-gpio",
429 .id = 2, 415 .id = 2,
430 .num_resources = 0,
431 .dev.platform_data = &jive_wm8750_spi, 416 .dev.platform_data = &jive_wm8750_spi,
432}; 417};
433 418
@@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = {
441 .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */ 426 .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */
442 .max_speed_hz = 100000, 427 .max_speed_hz = 100000,
443 .platform_data = &jive_lcm_config, 428 .platform_data = &jive_lcm_config,
429 .controller_data = (void *)S3C2410_GPB(7),
444 }, { 430 }, {
445 .modalias = "WM8750", 431 .modalias = "WM8750",
446 .bus_num = 2, 432 .bus_num = 2,
447 .chip_select = 0, 433 .chip_select = 0,
448 .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */ 434 .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */
449 .max_speed_hz = 100000, 435 .max_speed_hz = 100000,
436 .controller_data = (void *)S3C2410_GPH(10),
450 }, 437 },
451}; 438};
452 439
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 716662008ce2..c10ddf4ed7f1 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -74,7 +74,6 @@
74#include <mach/fb.h> 74#include <mach/fb.h>
75 75
76#include <mach/spi.h> 76#include <mach/spi.h>
77#include <mach/spi-gpio.h>
78#include <plat/usb-control.h> 77#include <plat/usb-control.h>
79#include <mach/regs-mem.h> 78#include <mach/regs-mem.h>
80#include <mach/hardware.h> 79#include <mach/hardware.h>
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index dd3120df09fe..fc2dc0b3d4fe 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -552,7 +552,7 @@ struct mini2440_features_t {
552 struct platform_device *optional[8]; 552 struct platform_device *optional[8];
553}; 553};
554 554
555static void mini2440_parse_features( 555static void __init mini2440_parse_features(
556 struct mini2440_features_t * features, 556 struct mini2440_features_t * features,
557 const char * features_str ) 557 const char * features_str )
558{ 558{
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 82db072cb836..5e6b42089eb4 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
88 .cfg_gpio = s3c64xx_spi_cfg_gpio, 88 .cfg_gpio = s3c64xx_spi_cfg_gpio,
89 .fifo_lvl_mask = 0x7f, 89 .fifo_lvl_mask = 0x7f,
90 .rx_lvl_offset = 13, 90 .rx_lvl_offset = 13,
91 .tx_st_done = 21,
91}; 92};
92 93
93static u64 spi_dmamask = DMA_BIT_MASK(32); 94static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
132 .cfg_gpio = s3c64xx_spi_cfg_gpio, 133 .cfg_gpio = s3c64xx_spi_cfg_gpio,
133 .fifo_lvl_mask = 0x7f, 134 .fifo_lvl_mask = 0x7f,
134 .rx_lvl_offset = 13, 135 .rx_lvl_offset = 13,
136 .tx_st_done = 21,
135}; 137};
136 138
137struct platform_device s3c64xx_device_spi1 = { 139struct platform_device s3c64xx_device_spi1 = {
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index b197171e7d03..204bfafe4bfc 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -113,7 +113,7 @@ found:
113 return chan; 113 return chan;
114} 114}
115 115
116int s3c2410_dma_config(unsigned int channel, int xferunit) 116int s3c2410_dma_config(enum dma_ch channel, int xferunit)
117{ 117{
118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
119 119
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
297 return 0; 297 return 0;
298} 298}
299 299
300int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) 300int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
301{ 301{
302 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 302 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
303 303
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
331 * 331 *
332 */ 332 */
333 333
334int s3c2410_dma_enqueue(unsigned int channel, void *id, 334int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
335 dma_addr_t data, int size) 335 dma_addr_t data, int size)
336{ 336{
337 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 337 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -415,7 +415,7 @@ err_buff:
415EXPORT_SYMBOL(s3c2410_dma_enqueue); 415EXPORT_SYMBOL(s3c2410_dma_enqueue);
416 416
417 417
418int s3c2410_dma_devconfig(unsigned int channel, 418int s3c2410_dma_devconfig(enum dma_ch channel,
419 enum s3c2410_dmasrc source, 419 enum s3c2410_dmasrc source,
420 unsigned long devaddr) 420 unsigned long devaddr)
421{ 421{
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
463EXPORT_SYMBOL(s3c2410_dma_devconfig); 463EXPORT_SYMBOL(s3c2410_dma_devconfig);
464 464
465 465
466int s3c2410_dma_getposition(unsigned int channel, 466int s3c2410_dma_getposition(enum dma_ch channel,
467 dma_addr_t *src, dma_addr_t *dst) 467 dma_addr_t *src, dma_addr_t *dst)
468{ 468{
469 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 469 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
487 * get control of an dma channel 487 * get control of an dma channel
488*/ 488*/
489 489
490int s3c2410_dma_request(unsigned int channel, 490int s3c2410_dma_request(enum dma_ch channel,
491 struct s3c2410_dma_client *client, 491 struct s3c2410_dma_client *client,
492 void *dev) 492 void *dev)
493{ 493{
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
533 * allowed to go through. 533 * allowed to go through.
534*/ 534*/
535 535
536int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) 536int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
537{ 537{
538 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 538 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
539 unsigned long flags; 539 unsigned long flags;
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c
index e78ee18c76e3..ac825e826326 100644
--- a/arch/arm/mach-s5p64x0/dev-spi.c
+++ b/arch/arm/mach-s5p64x0/dev-spi.c
@@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
112 .cfg_gpio = s5p6440_spi_cfg_gpio, 112 .cfg_gpio = s5p6440_spi_cfg_gpio,
113 .fifo_lvl_mask = 0x1ff, 113 .fifo_lvl_mask = 0x1ff,
114 .rx_lvl_offset = 15, 114 .rx_lvl_offset = 15,
115 .tx_st_done = 25,
115}; 116};
116 117
117static struct s3c64xx_spi_info s5p6450_spi0_pdata = { 118static struct s3c64xx_spi_info s5p6450_spi0_pdata = {
118 .cfg_gpio = s5p6450_spi_cfg_gpio, 119 .cfg_gpio = s5p6450_spi_cfg_gpio,
119 .fifo_lvl_mask = 0x1ff, 120 .fifo_lvl_mask = 0x1ff,
120 .rx_lvl_offset = 15, 121 .rx_lvl_offset = 15,
122 .tx_st_done = 25,
121}; 123};
122 124
123static u64 spi_dmamask = DMA_BIT_MASK(32); 125static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
160 .cfg_gpio = s5p6440_spi_cfg_gpio, 162 .cfg_gpio = s5p6440_spi_cfg_gpio,
161 .fifo_lvl_mask = 0x7f, 163 .fifo_lvl_mask = 0x7f,
162 .rx_lvl_offset = 15, 164 .rx_lvl_offset = 15,
165 .tx_st_done = 25,
163}; 166};
164 167
165static struct s3c64xx_spi_info s5p6450_spi1_pdata = { 168static struct s3c64xx_spi_info s5p6450_spi1_pdata = {
166 .cfg_gpio = s5p6450_spi_cfg_gpio, 169 .cfg_gpio = s5p6450_spi_cfg_gpio,
167 .fifo_lvl_mask = 0x7f, 170 .fifo_lvl_mask = 0x7f,
168 .rx_lvl_offset = 15, 171 .rx_lvl_offset = 15,
172 .tx_st_done = 25,
169}; 173};
170 174
171struct platform_device s5p64x0_device_spi1 = { 175struct platform_device s5p64x0_device_spi1 = {
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
index 57b19794d9bb..e5d6c4dceb56 100644
--- a/arch/arm/mach-s5pc100/dev-spi.c
+++ b/arch/arm/mach-s5pc100/dev-spi.c
@@ -15,6 +15,7 @@
15#include <mach/dma.h> 15#include <mach/dma.h>
16#include <mach/map.h> 16#include <mach/map.h>
17#include <mach/spi-clocks.h> 17#include <mach/spi-clocks.h>
18#include <mach/irqs.h>
18 19
19#include <plat/s3c64xx-spi.h> 20#include <plat/s3c64xx-spi.h>
20#include <plat/gpio-cfg.h> 21#include <plat/gpio-cfg.h>
@@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
90 .fifo_lvl_mask = 0x7f, 91 .fifo_lvl_mask = 0x7f,
91 .rx_lvl_offset = 13, 92 .rx_lvl_offset = 13,
92 .high_speed = 1, 93 .high_speed = 1,
94 .tx_st_done = 21,
93}; 95};
94 96
95static u64 spi_dmamask = DMA_BIT_MASK(32); 97static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
134 .fifo_lvl_mask = 0x7f, 136 .fifo_lvl_mask = 0x7f,
135 .rx_lvl_offset = 13, 137 .rx_lvl_offset = 13,
136 .high_speed = 1, 138 .high_speed = 1,
139 .tx_st_done = 21,
137}; 140};
138 141
139struct platform_device s5pc100_device_spi1 = { 142struct platform_device s5pc100_device_spi1 = {
@@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
176 .fifo_lvl_mask = 0x7f, 179 .fifo_lvl_mask = 0x7f,
177 .rx_lvl_offset = 13, 180 .rx_lvl_offset = 13,
178 .high_speed = 1, 181 .high_speed = 1,
182 .tx_st_done = 21,
179}; 183};
180 184
181struct platform_device s5pc100_device_spi2 = { 185struct platform_device s5pc100_device_spi2 = {
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
index e3249a47e3b1..eaf9a7bff7a0 100644
--- a/arch/arm/mach-s5pv210/dev-spi.c
+++ b/arch/arm/mach-s5pv210/dev-spi.c
@@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
85 .fifo_lvl_mask = 0x1ff, 85 .fifo_lvl_mask = 0x1ff,
86 .rx_lvl_offset = 15, 86 .rx_lvl_offset = 15,
87 .high_speed = 1, 87 .high_speed = 1,
88 .tx_st_done = 25,
88}; 89};
89 90
90static u64 spi_dmamask = DMA_BIT_MASK(32); 91static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
129 .fifo_lvl_mask = 0x7f, 130 .fifo_lvl_mask = 0x7f,
130 .rx_lvl_offset = 15, 131 .rx_lvl_offset = 15,
131 .high_speed = 1, 132 .high_speed = 1,
133 .tx_st_done = 25,
132}; 134};
133 135
134struct platform_device s5pv210_device_spi1 = { 136struct platform_device s5pv210_device_spi1 = {
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 803bc6edfca4..b473b8efac68 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void)
1408 1408
1409 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); 1409 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
1410 1410
1411 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
1412 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
1413 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
1414
1411 hdmi_init_pm_clock(); 1415 hdmi_init_pm_clock();
1412 fsi_init_pm_clock(); 1416 fsi_init_pm_clock();
1413 sh7372_pm_init(); 1417 sh7372_pm_init();
1418 pm_clk_add(&fsi_device.dev, "spu2");
1414} 1419}
1415 1420
1416static void __init ap4evb_timer_init(void) 1421static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 3802f2afabef..5b36b6c5b448 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1582,8 +1582,13 @@ static void __init mackerel_init(void)
1582 1582
1583 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); 1583 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1584 1584
1585 sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
1586 sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
1587 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
1588
1585 hdmi_init_pm_clock(); 1589 hdmi_init_pm_clock();
1586 sh7372_pm_init(); 1590 sh7372_pm_init();
1591 pm_clk_add(&fsi_device.dev, "spu2");
1587} 1592}
1588 1593
1589static void __init mackerel_timer_init(void) 1594static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index c0800d83971e..91f5779abdd3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = {
662 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 662 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
663 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), 663 CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
664 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), 664 CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
665 CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
665}; 666};
666 667
667void __init sh7372_clock_init(void) 668void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index df20d7670172..ce595cee86cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -12,6 +12,7 @@
12#define __ASM_SH7372_H__ 12#define __ASM_SH7372_H__
13 13
14#include <linux/sh_clk.h> 14#include <linux/sh_clk.h>
15#include <linux/pm_domain.h>
15 16
16/* 17/*
17 * Pin Function Controller: 18 * Pin Function Controller:
@@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk;
470extern struct clk sh7372_fsidiva_clk; 471extern struct clk sh7372_fsidiva_clk;
471extern struct clk sh7372_fsidivb_clk; 472extern struct clk sh7372_fsidivb_clk;
472 473
474struct platform_device;
475
476struct sh7372_pm_domain {
477 struct generic_pm_domain genpd;
478 unsigned int bit_shift;
479};
480
481static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
482{
483 return container_of(d, struct sh7372_pm_domain, genpd);
484}
485
486#ifdef CONFIG_PM
487extern struct sh7372_pm_domain sh7372_a4lc;
488extern struct sh7372_pm_domain sh7372_a4mp;
489extern struct sh7372_pm_domain sh7372_d4;
490extern struct sh7372_pm_domain sh7372_a3rv;
491extern struct sh7372_pm_domain sh7372_a3ri;
492extern struct sh7372_pm_domain sh7372_a3sg;
493
494extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
495extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
496 struct platform_device *pdev);
497#else
498#define sh7372_init_pm_domain(pd) do { } while(0)
499#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
500#endif /* CONFIG_PM */
501
473#endif /* __ASM_SH7372_H__ */ 502#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 8e4aadf14c9f..933fb411be0f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,16 +15,176 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_runtime.h>
19#include <linux/platform_device.h>
20#include <linux/delay.h>
18#include <asm/system.h> 21#include <asm/system.h>
19#include <asm/io.h> 22#include <asm/io.h>
20#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
21#include <mach/common.h> 24#include <mach/common.h>
25#include <mach/sh7372.h>
22 26
23#define SMFRAM 0xe6a70000 27#define SMFRAM 0xe6a70000
24#define SYSTBCR 0xe6150024 28#define SYSTBCR 0xe6150024
25#define SBAR 0xe6180020 29#define SBAR 0xe6180020
26#define APARMBAREA 0xe6f10020 30#define APARMBAREA 0xe6f10020
27 31
32#define SPDCR 0xe6180008
33#define SWUCR 0xe6180014
34#define PSTR 0xe6180080
35
36#define PSTR_RETRIES 100
37#define PSTR_DELAY_US 10
38
39#ifdef CONFIG_PM
40
41static int pd_power_down(struct generic_pm_domain *genpd)
42{
43 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
44 unsigned int mask = 1 << sh7372_pd->bit_shift;
45
46 if (__raw_readl(PSTR) & mask) {
47 unsigned int retry_count;
48
49 __raw_writel(mask, SPDCR);
50
51 for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
52 if (!(__raw_readl(SPDCR) & mask))
53 break;
54 cpu_relax();
55 }
56 }
57
58 pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
59 mask, __raw_readl(PSTR));
60
61 return 0;
62}
63
64static int pd_power_up(struct generic_pm_domain *genpd)
65{
66 struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
67 unsigned int mask = 1 << sh7372_pd->bit_shift;
68 unsigned int retry_count;
69 int ret = 0;
70
71 if (__raw_readl(PSTR) & mask)
72 goto out;
73
74 __raw_writel(mask, SWUCR);
75
76 for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
77 if (!(__raw_readl(SWUCR) & mask))
78 goto out;
79 if (retry_count > PSTR_RETRIES)
80 udelay(PSTR_DELAY_US);
81 else
82 cpu_relax();
83 }
84 if (__raw_readl(SWUCR) & mask)
85 ret = -EIO;
86
87 out:
88 pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
89 mask, __raw_readl(PSTR));
90
91 return ret;
92}
93
94static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
95{
96 int ret = pd_power_up(genpd);
97
98 /* force A4LC on after A3RV has been requested on */
99 pm_genpd_poweron(&sh7372_a4lc.genpd);
100
101 return ret;
102}
103
104static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
105{
106 int ret = pd_power_down(genpd);
107
108 /* try to power down A4LC after A3RV is requested off */
109 genpd_queue_power_off_work(&sh7372_a4lc.genpd);
110
111 return ret;
112}
113
114static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
115{
116 /* only power down A4LC if A3RV is off */
117 if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
118 return pd_power_down(genpd);
119
120 return -EBUSY;
121}
122
123static bool pd_active_wakeup(struct device *dev)
124{
125 return true;
126}
127
128void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
129{
130 struct generic_pm_domain *genpd = &sh7372_pd->genpd;
131
132 pm_genpd_init(genpd, NULL, false);
133 genpd->stop_device = pm_clk_suspend;
134 genpd->start_device = pm_clk_resume;
135 genpd->active_wakeup = pd_active_wakeup;
136
137 if (sh7372_pd == &sh7372_a4lc) {
138 genpd->power_off = pd_power_down_a4lc;
139 genpd->power_on = pd_power_up;
140 } else if (sh7372_pd == &sh7372_a3rv) {
141 genpd->power_off = pd_power_down_a3rv;
142 genpd->power_on = pd_power_up_a3rv;
143 } else {
144 genpd->power_off = pd_power_down;
145 genpd->power_on = pd_power_up;
146 }
147 genpd->power_on(&sh7372_pd->genpd);
148}
149
150void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
151 struct platform_device *pdev)
152{
153 struct device *dev = &pdev->dev;
154
155 if (!dev->power.subsys_data) {
156 pm_clk_init(dev);
157 pm_clk_add(dev, NULL);
158 }
159 pm_genpd_add_device(&sh7372_pd->genpd, dev);
160}
161
162struct sh7372_pm_domain sh7372_a4lc = {
163 .bit_shift = 1,
164};
165
166struct sh7372_pm_domain sh7372_a4mp = {
167 .bit_shift = 2,
168};
169
170struct sh7372_pm_domain sh7372_d4 = {
171 .bit_shift = 3,
172};
173
174struct sh7372_pm_domain sh7372_a3rv = {
175 .bit_shift = 6,
176};
177
178struct sh7372_pm_domain sh7372_a3ri = {
179 .bit_shift = 8,
180};
181
182struct sh7372_pm_domain sh7372_a3sg = {
183 .bit_shift = 13,
184};
185
186#endif /* CONFIG_PM */
187
28static void sh7372_enter_core_standby(void) 188static void sh7372_enter_core_standby(void)
29{ 189{
30 void __iomem *smfram = (void __iomem *)SMFRAM; 190 void __iomem *smfram = (void __iomem *)SMFRAM;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 2d1b67a59e4a..6ec454e1e063 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/pm_runtime.h> 16#include <linux/pm_runtime.h>
17#include <linux/pm_domain.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/sh_clk.h> 20#include <linux/sh_clk.h>
@@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev)
28 return pm_runtime_suspend(dev); 29 return pm_runtime_suspend(dev);
29} 30}
30 31
31static struct dev_power_domain default_power_domain = { 32static struct dev_pm_domain default_pm_domain = {
32 .ops = { 33 .ops = {
33 .runtime_suspend = pm_runtime_clk_suspend, 34 .runtime_suspend = pm_clk_suspend,
34 .runtime_resume = pm_runtime_clk_resume, 35 .runtime_resume = pm_clk_resume,
35 .runtime_idle = default_platform_runtime_idle, 36 .runtime_idle = default_platform_runtime_idle,
36 USE_PLATFORM_PM_SLEEP_OPS 37 USE_PLATFORM_PM_SLEEP_OPS
37 }, 38 },
38}; 39};
39 40
40#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) 41#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
41 42
42#else 43#else
43 44
44#define DEFAULT_PWR_DOMAIN_PTR NULL 45#define DEFAULT_PM_DOMAIN_PTR NULL
45 46
46#endif /* CONFIG_PM_RUNTIME */ 47#endif /* CONFIG_PM_RUNTIME */
47 48
48static struct pm_clk_notifier_block platform_bus_notifier = { 49static struct pm_clk_notifier_block platform_bus_notifier = {
49 .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, 50 .pm_domain = DEFAULT_PM_DOMAIN_PTR,
50 .con_ids = { NULL, }, 51 .con_ids = { NULL, },
51}; 52};
52 53
53static int __init sh_pm_runtime_init(void) 54static int __init sh_pm_runtime_init(void)
54{ 55{
55 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 56 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0; 57 return 0;
57} 58}
58core_initcall(sh_pm_runtime_init); 59core_initcall(sh_pm_runtime_init);
60
61static int __init sh_pm_runtime_late_init(void)
62{
63 pm_genpd_poweroff_unused();
64 return 0;
65}
66late_initcall(sh_pm_runtime_late_init);
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index cd807eea69e2..79f0413d8725 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
841 841
842void __init sh7372_add_standard_devices(void) 842void __init sh7372_add_standard_devices(void)
843{ 843{
844 sh7372_init_pm_domain(&sh7372_a4lc);
845 sh7372_init_pm_domain(&sh7372_a4mp);
846 sh7372_init_pm_domain(&sh7372_d4);
847 sh7372_init_pm_domain(&sh7372_a3rv);
848 sh7372_init_pm_domain(&sh7372_a3ri);
849 sh7372_init_pm_domain(&sh7372_a3sg);
850
844 platform_add_devices(sh7372_early_devices, 851 platform_add_devices(sh7372_early_devices,
845 ARRAY_SIZE(sh7372_early_devices)); 852 ARRAY_SIZE(sh7372_early_devices));
846 853
847 platform_add_devices(sh7372_late_devices, 854 platform_add_devices(sh7372_late_devices,
848 ARRAY_SIZE(sh7372_late_devices)); 855 ARRAY_SIZE(sh7372_late_devices));
856
857 sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
858 sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
859 sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
849} 860}
850 861
851void __init sh7372_add_early_devices(void) 862void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 823c703e573c..ed58ef9019b5 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -4,7 +4,6 @@ obj-y += io.o
4obj-y += irq.o 4obj-y += irq.o
5obj-y += clock.o 5obj-y += clock.o
6obj-y += timer.o 6obj-y += timer.o
7obj-y += gpio.o
8obj-y += pinmux.o 7obj-y += pinmux.o
9obj-y += powergate.o 8obj-y += powergate.o
10obj-y += fuse.o 9obj-y += fuse.o
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index e028320ab423..f8d41ffc0ca9 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -585,7 +585,7 @@ static const struct file_operations possible_parents_fops = {
585 585
586static int clk_debugfs_register_one(struct clk *c) 586static int clk_debugfs_register_one(struct clk *c)
587{ 587{
588 struct dentry *d, *child, *child_tmp; 588 struct dentry *d;
589 589
590 d = debugfs_create_dir(c->name, clk_debugfs_root); 590 d = debugfs_create_dir(c->name, clk_debugfs_root);
591 if (!d) 591 if (!d)
@@ -614,10 +614,7 @@ static int clk_debugfs_register_one(struct clk *c)
614 return 0; 614 return 0;
615 615
616err_out: 616err_out:
617 d = c->dent; 617 debugfs_remove_recursive(c->dent);
618 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
619 debugfs_remove(child);
620 debugfs_remove(c->dent);
621 return -ENOMEM; 618 return -ENOMEM;
622} 619}
623 620
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
deleted file mode 100644
index 919d63837736..000000000000
--- a/arch/arm/mach-tegra/gpio.c
+++ /dev/null
@@ -1,431 +0,0 @@
1/*
2 * arch/arm/mach-tegra/gpio.c
3 *
4 * Copyright (c) 2010 Google, Inc
5 *
6 * Author:
7 * Erik Gilling <konkers@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/interrupt.h>
23
24#include <linux/io.h>
25#include <linux/gpio.h>
26
27#include <asm/mach/irq.h>
28
29#include <mach/iomap.h>
30#include <mach/suspend.h>
31
32#define GPIO_BANK(x) ((x) >> 5)
33#define GPIO_PORT(x) (((x) >> 3) & 0x3)
34#define GPIO_BIT(x) ((x) & 0x7)
35
36#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
37 GPIO_BANK(x) * 0x80 + \
38 GPIO_PORT(x) * 4)
39
40#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
41#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
42#define GPIO_OUT(x) (GPIO_REG(x) + 0X20)
43#define GPIO_IN(x) (GPIO_REG(x) + 0x30)
44#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40)
45#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50)
46#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60)
47#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70)
48
49#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800)
50#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810)
51#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820)
52#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840)
53#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850)
54#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860)
55
56#define GPIO_INT_LVL_MASK 0x010101
57#define GPIO_INT_LVL_EDGE_RISING 0x000101
58#define GPIO_INT_LVL_EDGE_FALLING 0x000100
59#define GPIO_INT_LVL_EDGE_BOTH 0x010100
60#define GPIO_INT_LVL_LEVEL_HIGH 0x000001
61#define GPIO_INT_LVL_LEVEL_LOW 0x000000
62
63struct tegra_gpio_bank {
64 int bank;
65 int irq;
66 spinlock_t lvl_lock[4];
67#ifdef CONFIG_PM
68 u32 cnf[4];
69 u32 out[4];
70 u32 oe[4];
71 u32 int_enb[4];
72 u32 int_lvl[4];
73#endif
74};
75
76
77static struct tegra_gpio_bank tegra_gpio_banks[] = {
78 {.bank = 0, .irq = INT_GPIO1},
79 {.bank = 1, .irq = INT_GPIO2},
80 {.bank = 2, .irq = INT_GPIO3},
81 {.bank = 3, .irq = INT_GPIO4},
82 {.bank = 4, .irq = INT_GPIO5},
83 {.bank = 5, .irq = INT_GPIO6},
84 {.bank = 6, .irq = INT_GPIO7},
85};
86
87static int tegra_gpio_compose(int bank, int port, int bit)
88{
89 return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
90}
91
92static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
93{
94 u32 val;
95
96 val = 0x100 << GPIO_BIT(gpio);
97 if (value)
98 val |= 1 << GPIO_BIT(gpio);
99 __raw_writel(val, reg);
100}
101
102void tegra_gpio_enable(int gpio)
103{
104 tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
105}
106
107void tegra_gpio_disable(int gpio)
108{
109 tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
110}
111
112static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
113{
114 tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value);
115}
116
117static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
118{
119 return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
120}
121
122static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
123{
124 tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0);
125 return 0;
126}
127
128static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
129 int value)
130{
131 tegra_gpio_set(chip, offset, value);
132 tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1);
133 return 0;
134}
135
136
137
138static struct gpio_chip tegra_gpio_chip = {
139 .label = "tegra-gpio",
140 .direction_input = tegra_gpio_direction_input,
141 .get = tegra_gpio_get,
142 .direction_output = tegra_gpio_direction_output,
143 .set = tegra_gpio_set,
144 .base = 0,
145 .ngpio = TEGRA_NR_GPIOS,
146};
147
148static void tegra_gpio_irq_ack(struct irq_data *d)
149{
150 int gpio = d->irq - INT_GPIO_BASE;
151
152 __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
153}
154
155static void tegra_gpio_irq_mask(struct irq_data *d)
156{
157 int gpio = d->irq - INT_GPIO_BASE;
158
159 tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
160}
161
162static void tegra_gpio_irq_unmask(struct irq_data *d)
163{
164 int gpio = d->irq - INT_GPIO_BASE;
165
166 tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
167}
168
169static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
170{
171 int gpio = d->irq - INT_GPIO_BASE;
172 struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
173 int port = GPIO_PORT(gpio);
174 int lvl_type;
175 int val;
176 unsigned long flags;
177
178 switch (type & IRQ_TYPE_SENSE_MASK) {
179 case IRQ_TYPE_EDGE_RISING:
180 lvl_type = GPIO_INT_LVL_EDGE_RISING;
181 break;
182
183 case IRQ_TYPE_EDGE_FALLING:
184 lvl_type = GPIO_INT_LVL_EDGE_FALLING;
185 break;
186
187 case IRQ_TYPE_EDGE_BOTH:
188 lvl_type = GPIO_INT_LVL_EDGE_BOTH;
189 break;
190
191 case IRQ_TYPE_LEVEL_HIGH:
192 lvl_type = GPIO_INT_LVL_LEVEL_HIGH;
193 break;
194
195 case IRQ_TYPE_LEVEL_LOW:
196 lvl_type = GPIO_INT_LVL_LEVEL_LOW;
197 break;
198
199 default:
200 return -EINVAL;
201 }
202
203 spin_lock_irqsave(&bank->lvl_lock[port], flags);
204
205 val = __raw_readl(GPIO_INT_LVL(gpio));
206 val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
207 val |= lvl_type << GPIO_BIT(gpio);
208 __raw_writel(val, GPIO_INT_LVL(gpio));
209
210 spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
211
212 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
213 __irq_set_handler_locked(d->irq, handle_level_irq);
214 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
215 __irq_set_handler_locked(d->irq, handle_edge_irq);
216
217 return 0;
218}
219
220static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
221{
222 struct tegra_gpio_bank *bank;
223 int port;
224 int pin;
225 int unmasked = 0;
226 struct irq_chip *chip = irq_desc_get_chip(desc);
227
228 chained_irq_enter(chip, desc);
229
230 bank = irq_get_handler_data(irq);
231
232 for (port = 0; port < 4; port++) {
233 int gpio = tegra_gpio_compose(bank->bank, port, 0);
234 unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) &
235 __raw_readl(GPIO_INT_ENB(gpio));
236 u32 lvl = __raw_readl(GPIO_INT_LVL(gpio));
237
238 for_each_set_bit(pin, &sta, 8) {
239 __raw_writel(1 << pin, GPIO_INT_CLR(gpio));
240
241 /* if gpio is edge triggered, clear condition
242 * before executing the hander so that we don't
243 * miss edges
244 */
245 if (lvl & (0x100 << pin)) {
246 unmasked = 1;
247 chained_irq_exit(chip, desc);
248 }
249
250 generic_handle_irq(gpio_to_irq(gpio + pin));
251 }
252 }
253
254 if (!unmasked)
255 chained_irq_exit(chip, desc);
256
257}
258
259#ifdef CONFIG_PM
260void tegra_gpio_resume(void)
261{
262 unsigned long flags;
263 int b;
264 int p;
265
266 local_irq_save(flags);
267
268 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
269 struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
270
271 for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
272 unsigned int gpio = (b<<5) | (p<<3);
273 __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
274 __raw_writel(bank->out[p], GPIO_OUT(gpio));
275 __raw_writel(bank->oe[p], GPIO_OE(gpio));
276 __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
277 __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
278 }
279 }
280
281 local_irq_restore(flags);
282}
283
284void tegra_gpio_suspend(void)
285{
286 unsigned long flags;
287 int b;
288 int p;
289
290 local_irq_save(flags);
291 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
292 struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
293
294 for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
295 unsigned int gpio = (b<<5) | (p<<3);
296 bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
297 bank->out[p] = __raw_readl(GPIO_OUT(gpio));
298 bank->oe[p] = __raw_readl(GPIO_OE(gpio));
299 bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
300 bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
301 }
302 }
303 local_irq_restore(flags);
304}
305
306static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
307{
308 struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
309 return irq_set_irq_wake(bank->irq, enable);
310}
311#endif
312
313static struct irq_chip tegra_gpio_irq_chip = {
314 .name = "GPIO",
315 .irq_ack = tegra_gpio_irq_ack,
316 .irq_mask = tegra_gpio_irq_mask,
317 .irq_unmask = tegra_gpio_irq_unmask,
318 .irq_set_type = tegra_gpio_irq_set_type,
319#ifdef CONFIG_PM
320 .irq_set_wake = tegra_gpio_wake_enable,
321#endif
322};
323
324
325/* This lock class tells lockdep that GPIO irqs are in a different
326 * category than their parents, so it won't report false recursion.
327 */
328static struct lock_class_key gpio_lock_class;
329
330static int __init tegra_gpio_init(void)
331{
332 struct tegra_gpio_bank *bank;
333 int i;
334 int j;
335
336 for (i = 0; i < 7; i++) {
337 for (j = 0; j < 4; j++) {
338 int gpio = tegra_gpio_compose(i, j, 0);
339 __raw_writel(0x00, GPIO_INT_ENB(gpio));
340 }
341 }
342
343 gpiochip_add(&tegra_gpio_chip);
344
345 for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
346 bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
347
348 irq_set_lockdep_class(i, &gpio_lock_class);
349 irq_set_chip_data(i, bank);
350 irq_set_chip_and_handler(i, &tegra_gpio_irq_chip,
351 handle_simple_irq);
352 set_irq_flags(i, IRQF_VALID);
353 }
354
355 for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
356 bank = &tegra_gpio_banks[i];
357
358 irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
359 irq_set_handler_data(bank->irq, bank);
360
361 for (j = 0; j < 4; j++)
362 spin_lock_init(&bank->lvl_lock[j]);
363 }
364
365 return 0;
366}
367
368postcore_initcall(tegra_gpio_init);
369
370void __init tegra_gpio_config(struct tegra_gpio_table *table, int num)
371{
372 int i;
373
374 for (i = 0; i < num; i++) {
375 int gpio = table[i].gpio;
376
377 if (table[i].enable)
378 tegra_gpio_enable(gpio);
379 else
380 tegra_gpio_disable(gpio);
381 }
382}
383
384#ifdef CONFIG_DEBUG_FS
385
386#include <linux/debugfs.h>
387#include <linux/seq_file.h>
388
389static int dbg_gpio_show(struct seq_file *s, void *unused)
390{
391 int i;
392 int j;
393
394 for (i = 0; i < 7; i++) {
395 for (j = 0; j < 4; j++) {
396 int gpio = tegra_gpio_compose(i, j, 0);
397 seq_printf(s,
398 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
399 i, j,
400 __raw_readl(GPIO_CNF(gpio)),
401 __raw_readl(GPIO_OE(gpio)),
402 __raw_readl(GPIO_OUT(gpio)),
403 __raw_readl(GPIO_IN(gpio)),
404 __raw_readl(GPIO_INT_STA(gpio)),
405 __raw_readl(GPIO_INT_ENB(gpio)),
406 __raw_readl(GPIO_INT_LVL(gpio)));
407 }
408 }
409 return 0;
410}
411
412static int dbg_gpio_open(struct inode *inode, struct file *file)
413{
414 return single_open(file, dbg_gpio_show, &inode->i_private);
415}
416
417static const struct file_operations debug_fops = {
418 .open = dbg_gpio_open,
419 .read = seq_read,
420 .llseek = seq_lseek,
421 .release = single_release,
422};
423
424static int __init tegra_gpio_debuginit(void)
425{
426 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
427 NULL, NULL, &debug_fops);
428 return 0;
429}
430late_initcall(tegra_gpio_debuginit);
431#endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 32ce90840ee1..7d107be63eb4 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,16 +635,13 @@ static const struct file_operations set_rate_fops = {
635static struct dentry *clk_debugfs_register_dir(struct clk *c, 635static struct dentry *clk_debugfs_register_dir(struct clk *c,
636 struct dentry *p_dentry) 636 struct dentry *p_dentry)
637{ 637{
638 struct dentry *d, *clk_d, *child, *child_tmp; 638 struct dentry *d, *clk_d;
639 char s[255]; 639 const char *p = c->name;
640 char *p = s;
641 640
642 if (c->name == NULL) 641 if (!p)
643 p += sprintf(p, "BUG"); 642 p = "BUG";
644 else
645 p += sprintf(p, "%s", c->name);
646 643
647 clk_d = debugfs_create_dir(s, p_dentry); 644 clk_d = debugfs_create_dir(p, p_dentry);
648 if (!clk_d) 645 if (!clk_d)
649 return NULL; 646 return NULL;
650 647
@@ -666,24 +663,10 @@ static struct dentry *clk_debugfs_register_dir(struct clk *c,
666 return clk_d; 663 return clk_d;
667 664
668err_out: 665err_out:
669 d = clk_d; 666 debugfs_remove_recursive(clk_d);
670 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
671 debugfs_remove(child);
672 debugfs_remove(clk_d);
673 return NULL; 667 return NULL;
674} 668}
675 669
676static void clk_debugfs_remove_dir(struct dentry *cdentry)
677{
678 struct dentry *d, *child, *child_tmp;
679
680 d = cdentry;
681 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
682 debugfs_remove(child);
683 debugfs_remove(cdentry);
684 return ;
685}
686
687static int clk_debugfs_register_one(struct clk *c) 670static int clk_debugfs_register_one(struct clk *c)
688{ 671{
689 struct clk *pa = c->parent_periph; 672 struct clk *pa = c->parent_periph;
@@ -700,7 +683,7 @@ static int clk_debugfs_register_one(struct clk *c)
700 c->dent_bus = clk_debugfs_register_dir(c, 683 c->dent_bus = clk_debugfs_register_dir(c,
701 bpa->dent_bus ? bpa->dent_bus : bpa->dent); 684 bpa->dent_bus ? bpa->dent_bus : bpa->dent);
702 if ((!c->dent_bus) && (c->dent)) { 685 if ((!c->dent_bus) && (c->dent)) {
703 clk_debugfs_remove_dir(c->dent); 686 debugfs_remove_recursive(c->dent);
704 c->dent = NULL; 687 c->dent = NULL;
705 return -ENOMEM; 688 return -ENOMEM;
706 } 689 }
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c
index 245140c0df10..642de0408f25 100644
--- a/arch/arm/mach-vt8500/irq.c
+++ b/arch/arm/mach-vt8500/irq.c
@@ -39,9 +39,10 @@
39static void __iomem *ic_regbase; 39static void __iomem *ic_regbase;
40static void __iomem *sic_regbase; 40static void __iomem *sic_regbase;
41 41
42static void vt8500_irq_mask(unsigned int irq) 42static void vt8500_irq_mask(struct irq_data *d)
43{ 43{
44 void __iomem *base = ic_regbase; 44 void __iomem *base = ic_regbase;
45 unsigned irq = d->irq;
45 u8 edge; 46 u8 edge;
46 47
47 if (irq >= 64) { 48 if (irq >= 64) {
@@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq)
64 } 65 }
65} 66}
66 67
67static void vt8500_irq_unmask(unsigned int irq) 68static void vt8500_irq_unmask(struct irq_data *d)
68{ 69{
69 void __iomem *base = ic_regbase; 70 void __iomem *base = ic_regbase;
71 unsigned irq = d->irq;
70 u8 dctr; 72 u8 dctr;
71 73
72 if (irq >= 64) { 74 if (irq >= 64) {
@@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq)
78 writeb(dctr, base + VT8500_IC_DCTR + irq); 80 writeb(dctr, base + VT8500_IC_DCTR + irq);
79} 81}
80 82
81static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) 83static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
82{ 84{
83 void __iomem *base = ic_regbase; 85 void __iomem *base = ic_regbase;
84 unsigned int orig_irq = irq; 86 unsigned irq = d->irq;
87 unsigned orig_irq = irq;
85 u8 dctr; 88 u8 dctr;
86 89
87 if (irq >= 64) { 90 if (irq >= 64) {
@@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
114} 117}
115 118
116static struct irq_chip vt8500_irq_chip = { 119static struct irq_chip vt8500_irq_chip = {
117 .name = "vt8500", 120 .name = "vt8500",
118 .ack = vt8500_irq_mask, 121 .irq_ack = vt8500_irq_mask,
119 .mask = vt8500_irq_mask, 122 .irq_mask = vt8500_irq_mask,
120 .unmask = vt8500_irq_unmask, 123 .irq_unmask = vt8500_irq_unmask,
121 .set_type = vt8500_irq_set_type, 124 .irq_set_type = vt8500_irq_set_type,
122}; 125};
123 126
124void __init vt8500_init_irq(void) 127void __init vt8500_init_irq(void)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index ef59099a5463..44c086710d2b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -120,17 +120,22 @@ static void l2x0_cache_sync(void)
120 spin_unlock_irqrestore(&l2x0_lock, flags); 120 spin_unlock_irqrestore(&l2x0_lock, flags);
121} 121}
122 122
123static void l2x0_flush_all(void) 123static void __l2x0_flush_all(void)
124{ 124{
125 unsigned long flags;
126
127 /* clean all ways */
128 spin_lock_irqsave(&l2x0_lock, flags);
129 debug_writel(0x03); 125 debug_writel(0x03);
130 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 126 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
131 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 127 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
132 cache_sync(); 128 cache_sync();
133 debug_writel(0x00); 129 debug_writel(0x00);
130}
131
132static void l2x0_flush_all(void)
133{
134 unsigned long flags;
135
136 /* clean all ways */
137 spin_lock_irqsave(&l2x0_lock, flags);
138 __l2x0_flush_all();
134 spin_unlock_irqrestore(&l2x0_lock, flags); 139 spin_unlock_irqrestore(&l2x0_lock, flags);
135} 140}
136 141
@@ -266,7 +271,9 @@ static void l2x0_disable(void)
266 unsigned long flags; 271 unsigned long flags;
267 272
268 spin_lock_irqsave(&l2x0_lock, flags); 273 spin_lock_irqsave(&l2x0_lock, flags);
269 writel(0, l2x0_base + L2X0_CTRL); 274 __l2x0_flush_all();
275 writel_relaxed(0, l2x0_base + L2X0_CTRL);
276 dsb();
270 spin_unlock_irqrestore(&l2x0_lock, flags); 277 spin_unlock_irqrestore(&l2x0_lock, flags);
271} 278}
272 279
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bc0e1d88fd3b..9ea4f7ddd665 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
318 fault = __do_page_fault(mm, addr, fsr, tsk); 318 fault = __do_page_fault(mm, addr, fsr, tsk);
319 up_read(&mm->mmap_sem); 319 up_read(&mm->mmap_sem);
320 320
321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); 321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
322 if (fault & VM_FAULT_MAJOR) 322 if (fault & VM_FAULT_MAJOR)
323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); 323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
324 else if (fault & VM_FAULT_MINOR) 324 else if (fault & VM_FAULT_MINOR)
325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); 325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
326 326
327 /* 327 /*
328 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 328 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d9e736c2b4f..594d677b92c8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc);
759 759
760static phys_addr_t lowmem_limit __initdata = 0; 760static phys_addr_t lowmem_limit __initdata = 0;
761 761
762static void __init sanity_check_meminfo(void) 762void __init sanity_check_meminfo(void)
763{ 763{
764 int i, j, highmem = 0; 764 int i, j, highmem = 0;
765 765
@@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc)
1032{ 1032{
1033 void *zero_page; 1033 void *zero_page;
1034 1034
1035 memblock_set_current_limit(lowmem_limit);
1036
1035 build_mem_type_table(); 1037 build_mem_type_table();
1036 sanity_check_meminfo();
1037 prepare_page_table(); 1038 prepare_page_table();
1038 map_lowmem(); 1039 map_lowmem();
1039 devicemaps_init(mdesc); 1040 devicemaps_init(mdesc);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 687d02319a41..941a98c9e8aa 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void)
27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); 27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
28} 28}
29 29
30void __init sanity_check_meminfo(void)
31{
32}
33
30/* 34/*
31 * paging_init() sets up the page tables, initialises the zone memory 35 * paging_init() sets up the page tables, initialises the zone memory
32 * maps, and sets up the zero page, bad page and bad page tables. 36 * maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index a1387875a491..d53c35fe2ea7 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := clock.o gpio.o time.o devices.o cpu.o system.o irq-common.o 6obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
7 7
8# MX51 uses the TZIC interrupt controller, older platforms use AVIC 8# MX51 uses the TZIC interrupt controller, older platforms use AVIC
9obj-$(CONFIG_MXC_TZIC) += tzic.o 9obj-$(CONFIG_MXC_TZIC) += tzic.o
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index eee1b6096a08..fb166b20f60f 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -89,3 +89,14 @@ err:
89 89
90 return pdev; 90 return pdev;
91} 91}
92
93struct device mxc_aips_bus = {
94 .init_name = "mxc_aips",
95 .parent = &platform_bus,
96};
97
98static int __init mxc_device_init(void)
99{
100 return device_register(&mxc_aips_bus);
101}
102core_initcall(mxc_device_init);
diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile
index ad2922acf480..b41bf972b54b 100644
--- a/arch/arm/plat-mxc/devices/Makefile
+++ b/arch/arm/plat-mxc/devices/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o
2obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o 2obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
3obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o 3obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o
4obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o 4obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o
5obj-y += platform-gpio-mxc.o
5obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o 6obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o
6obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o 7obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o
7obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o 8obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o
diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c
index ccc789e21daa..4fc6ffc2a13e 100644
--- a/arch/arm/plat-mxc/devices/platform-fec.c
+++ b/arch/arm/plat-mxc/devices/platform-fec.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <asm/sizes.h> 10#include <asm/sizes.h>
10#include <mach/hardware.h> 11#include <mach/hardware.h>
11#include <mach/devices-common.h> 12#include <mach/devices-common.h>
diff --git a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
index 59c33f6e401c..23ce08e6ffd2 100644
--- a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
+++ b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <mach/hardware.h> 10#include <mach/hardware.h>
10#include <mach/devices-common.h> 11#include <mach/devices-common.h>
11 12
diff --git a/arch/arm/plat-mxc/devices/platform-gpio-mxc.c b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
new file mode 100644
index 000000000000..a7919a241032
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2011 Linaro Limited
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/devices-common.h>
10
11struct platform_device *__init mxc_register_gpio(char *name, int id,
12 resource_size_t iobase, resource_size_t iosize, int irq, int irq_high)
13{
14 struct resource res[] = {
15 {
16 .start = iobase,
17 .end = iobase + iosize - 1,
18 .flags = IORESOURCE_MEM,
19 }, {
20 .start = irq,
21 .end = irq,
22 .flags = IORESOURCE_IRQ,
23 }, {
24 .start = irq_high,
25 .end = irq_high,
26 .flags = IORESOURCE_IRQ,
27 },
28 };
29
30 return platform_device_register_resndata(&mxc_aips_bus,
31 name, id, res, ARRAY_SIZE(res), NULL, 0);
32}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-fb.c b/arch/arm/plat-mxc/devices/platform-imx-fb.c
index 79a1cb18a5b0..2b0b5e0aa998 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-fb.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-fb.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <mach/hardware.h> 10#include <mach/hardware.h>
10#include <mach/devices-common.h> 11#include <mach/devices-common.h>
11 12
diff --git a/arch/arm/plat-mxc/devices/platform-ipu-core.c b/arch/arm/plat-mxc/devices/platform-ipu-core.c
index edf65034aea5..79d340ae0af1 100644
--- a/arch/arm/plat-mxc/devices/platform-ipu-core.c
+++ b/arch/arm/plat-mxc/devices/platform-ipu-core.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <mach/hardware.h> 10#include <mach/hardware.h>
10#include <mach/devices-common.h> 11#include <mach/devices-common.h>
11 12
diff --git a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
index cc488f4b6204..e1763e03e7cb 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <mach/hardware.h> 10#include <mach/hardware.h>
10#include <mach/devices-common.h> 11#include <mach/devices-common.h>
11 12
diff --git a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
index 90d762f6f93b..540d3a7d92df 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c
@@ -6,6 +6,7 @@
6 * the terms of the GNU General Public License version 2 as published by the 6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/dma-mapping.h>
9#include <mach/hardware.h> 10#include <mach/hardware.h>
10#include <mach/devices-common.h> 11#include <mach/devices-common.h>
11 12
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index f97eb3615b2c..9bfae8bd5b8d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
40#endif 40#endif
41 41
42#ifdef CONFIG_SOC_IMX25 42#ifdef CONFIG_SOC_IMX25
43/* i.mx25 has the i.mx35 type cspi */
43const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { 44const struct imx_spi_imx_data imx25_cspi_data[] __initconst = {
44#define imx25_cspi_data_entry(_id, _hwid) \ 45#define imx25_cspi_data_entry(_id, _hwid) \
45 imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K) 46 imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K)
46 imx25_cspi_data_entry(0, 1), 47 imx25_cspi_data_entry(0, 1),
47 imx25_cspi_data_entry(1, 2), 48 imx25_cspi_data_entry(1, 2),
48 imx25_cspi_data_entry(2, 3), 49 imx25_cspi_data_entry(2, 3),
@@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = {
79#endif /* ifdef CONFIG_SOC_IMX35 */ 80#endif /* ifdef CONFIG_SOC_IMX35 */
80 81
81#ifdef CONFIG_SOC_IMX51 82#ifdef CONFIG_SOC_IMX51
83/* i.mx51 has the i.mx35 type cspi */
82const struct imx_spi_imx_data imx51_cspi_data __initconst = 84const struct imx_spi_imx_data imx51_cspi_data __initconst =
83 imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K); 85 imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K);
84 86
85const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { 87const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
86#define imx51_ecspi_data_entry(_id, _hwid) \ 88#define imx51_ecspi_data_entry(_id, _hwid) \
@@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
91#endif /* ifdef CONFIG_SOC_IMX51 */ 93#endif /* ifdef CONFIG_SOC_IMX51 */
92 94
93#ifdef CONFIG_SOC_IMX53 95#ifdef CONFIG_SOC_IMX53
96/* i.mx53 has the i.mx35 type cspi */
94const struct imx_spi_imx_data imx53_cspi_data __initconst = 97const struct imx_spi_imx_data imx53_cspi_data __initconst =
95 imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K); 98 imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K);
96 99
100/* i.mx53 has the i.mx51 type ecspi */
97const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { 101const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
98#define imx53_ecspi_data_entry(_id, _hwid) \ 102#define imx53_ecspi_data_entry(_id, _hwid) \
99 imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K) 103 imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K)
100 imx53_ecspi_data_entry(0, 1), 104 imx53_ecspi_data_entry(0, 1),
101 imx53_ecspi_data_entry(1, 2), 105 imx53_ecspi_data_entry(1, 2),
102}; 106};
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
deleted file mode 100644
index 6cd6d7f686f6..000000000000
--- a/arch/arm/plat-mxc/gpio.c
+++ /dev/null
@@ -1,361 +0,0 @@
1/*
2 * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 *
5 * Based on code from Freescale,
6 * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/gpio.h>
27#include <mach/hardware.h>
28#include <asm-generic/bug.h>
29
30static struct mxc_gpio_port *mxc_gpio_ports;
31static int gpio_table_size;
32
33#define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2())
34
35#define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00)
36#define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04)
37#define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08)
38#define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C)
39#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10)
40#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14)
41#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
42
43#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0)
44#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1)
45#define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2)
46#define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3)
47#define GPIO_INT_NONE 0x4
48
49/* Note: This driver assumes 32 GPIOs are handled in one register */
50
51static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index)
52{
53 __raw_writel(1 << index, port->base + GPIO_ISR);
54}
55
56static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index,
57 int enable)
58{
59 u32 l;
60
61 l = __raw_readl(port->base + GPIO_IMR);
62 l = (l & (~(1 << index))) | (!!enable << index);
63 __raw_writel(l, port->base + GPIO_IMR);
64}
65
66static void gpio_ack_irq(struct irq_data *d)
67{
68 u32 gpio = irq_to_gpio(d->irq);
69 _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f);
70}
71
72static void gpio_mask_irq(struct irq_data *d)
73{
74 u32 gpio = irq_to_gpio(d->irq);
75 _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0);
76}
77
78static void gpio_unmask_irq(struct irq_data *d)
79{
80 u32 gpio = irq_to_gpio(d->irq);
81 _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1);
82}
83
84static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset);
85
86static int gpio_set_irq_type(struct irq_data *d, u32 type)
87{
88 u32 gpio = irq_to_gpio(d->irq);
89 struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
90 u32 bit, val;
91 int edge;
92 void __iomem *reg = port->base;
93
94 port->both_edges &= ~(1 << (gpio & 31));
95 switch (type) {
96 case IRQ_TYPE_EDGE_RISING:
97 edge = GPIO_INT_RISE_EDGE;
98 break;
99 case IRQ_TYPE_EDGE_FALLING:
100 edge = GPIO_INT_FALL_EDGE;
101 break;
102 case IRQ_TYPE_EDGE_BOTH:
103 val = mxc_gpio_get(&port->chip, gpio & 31);
104 if (val) {
105 edge = GPIO_INT_LOW_LEV;
106 pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
107 } else {
108 edge = GPIO_INT_HIGH_LEV;
109 pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
110 }
111 port->both_edges |= 1 << (gpio & 31);
112 break;
113 case IRQ_TYPE_LEVEL_LOW:
114 edge = GPIO_INT_LOW_LEV;
115 break;
116 case IRQ_TYPE_LEVEL_HIGH:
117 edge = GPIO_INT_HIGH_LEV;
118 break;
119 default:
120 return -EINVAL;
121 }
122
123 reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
124 bit = gpio & 0xf;
125 val = __raw_readl(reg) & ~(0x3 << (bit << 1));
126 __raw_writel(val | (edge << (bit << 1)), reg);
127 _clear_gpio_irqstatus(port, gpio & 0x1f);
128
129 return 0;
130}
131
132static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
133{
134 void __iomem *reg = port->base;
135 u32 bit, val;
136 int edge;
137
138 reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
139 bit = gpio & 0xf;
140 val = __raw_readl(reg);
141 edge = (val >> (bit << 1)) & 3;
142 val &= ~(0x3 << (bit << 1));
143 if (edge == GPIO_INT_HIGH_LEV) {
144 edge = GPIO_INT_LOW_LEV;
145 pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
146 } else if (edge == GPIO_INT_LOW_LEV) {
147 edge = GPIO_INT_HIGH_LEV;
148 pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
149 } else {
150 pr_err("mxc: invalid configuration for GPIO %d: %x\n",
151 gpio, edge);
152 return;
153 }
154 __raw_writel(val | (edge << (bit << 1)), reg);
155}
156
157/* handle 32 interrupts in one status register */
158static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
159{
160 u32 gpio_irq_no_base = port->virtual_irq_start;
161
162 while (irq_stat != 0) {
163 int irqoffset = fls(irq_stat) - 1;
164
165 if (port->both_edges & (1 << irqoffset))
166 mxc_flip_edge(port, irqoffset);
167
168 generic_handle_irq(gpio_irq_no_base + irqoffset);
169
170 irq_stat &= ~(1 << irqoffset);
171 }
172}
173
174/* MX1 and MX3 has one interrupt *per* gpio port */
175static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
176{
177 u32 irq_stat;
178 struct mxc_gpio_port *port = irq_get_handler_data(irq);
179
180 irq_stat = __raw_readl(port->base + GPIO_ISR) &
181 __raw_readl(port->base + GPIO_IMR);
182
183 mxc_gpio_irq_handler(port, irq_stat);
184}
185
186/* MX2 has one interrupt *for all* gpio ports */
187static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
188{
189 int i;
190 u32 irq_msk, irq_stat;
191 struct mxc_gpio_port *port = irq_get_handler_data(irq);
192
193 /* walk through all interrupt status registers */
194 for (i = 0; i < gpio_table_size; i++) {
195 irq_msk = __raw_readl(port[i].base + GPIO_IMR);
196 if (!irq_msk)
197 continue;
198
199 irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk;
200 if (irq_stat)
201 mxc_gpio_irq_handler(&port[i], irq_stat);
202 }
203}
204
205/*
206 * Set interrupt number "irq" in the GPIO as a wake-up source.
207 * While system is running, all registered GPIO interrupts need to have
208 * wake-up enabled. When system is suspended, only selected GPIO interrupts
209 * need to have wake-up enabled.
210 * @param irq interrupt source number
211 * @param enable enable as wake-up if equal to non-zero
212 * @return This function returns 0 on success.
213 */
214static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
215{
216 u32 gpio = irq_to_gpio(d->irq);
217 u32 gpio_idx = gpio & 0x1F;
218 struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
219
220 if (enable) {
221 if (port->irq_high && (gpio_idx >= 16))
222 enable_irq_wake(port->irq_high);
223 else
224 enable_irq_wake(port->irq);
225 } else {
226 if (port->irq_high && (gpio_idx >= 16))
227 disable_irq_wake(port->irq_high);
228 else
229 disable_irq_wake(port->irq);
230 }
231
232 return 0;
233}
234
235static struct irq_chip gpio_irq_chip = {
236 .name = "GPIO",
237 .irq_ack = gpio_ack_irq,
238 .irq_mask = gpio_mask_irq,
239 .irq_unmask = gpio_unmask_irq,
240 .irq_set_type = gpio_set_irq_type,
241 .irq_set_wake = gpio_set_wake_irq,
242};
243
244static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
245 int dir)
246{
247 struct mxc_gpio_port *port =
248 container_of(chip, struct mxc_gpio_port, chip);
249 u32 l;
250 unsigned long flags;
251
252 spin_lock_irqsave(&port->lock, flags);
253 l = __raw_readl(port->base + GPIO_GDIR);
254 if (dir)
255 l |= 1 << offset;
256 else
257 l &= ~(1 << offset);
258 __raw_writel(l, port->base + GPIO_GDIR);
259 spin_unlock_irqrestore(&port->lock, flags);
260}
261
262static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
263{
264 struct mxc_gpio_port *port =
265 container_of(chip, struct mxc_gpio_port, chip);
266 void __iomem *reg = port->base + GPIO_DR;
267 u32 l;
268 unsigned long flags;
269
270 spin_lock_irqsave(&port->lock, flags);
271 l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset);
272 __raw_writel(l, reg);
273 spin_unlock_irqrestore(&port->lock, flags);
274}
275
276static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
277{
278 struct mxc_gpio_port *port =
279 container_of(chip, struct mxc_gpio_port, chip);
280
281 return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1;
282}
283
284static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
285{
286 _set_gpio_direction(chip, offset, 0);
287 return 0;
288}
289
290static int mxc_gpio_direction_output(struct gpio_chip *chip,
291 unsigned offset, int value)
292{
293 mxc_gpio_set(chip, offset, value);
294 _set_gpio_direction(chip, offset, 1);
295 return 0;
296}
297
298/*
299 * This lock class tells lockdep that GPIO irqs are in a different
300 * category than their parents, so it won't report false recursion.
301 */
302static struct lock_class_key gpio_lock_class;
303
304int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
305{
306 int i, j;
307
308 /* save for local usage */
309 mxc_gpio_ports = port;
310 gpio_table_size = cnt;
311
312 printk(KERN_INFO "MXC GPIO hardware\n");
313
314 for (i = 0; i < cnt; i++) {
315 /* disable the interrupt and clear the status */
316 __raw_writel(0, port[i].base + GPIO_IMR);
317 __raw_writel(~0, port[i].base + GPIO_ISR);
318 for (j = port[i].virtual_irq_start;
319 j < port[i].virtual_irq_start + 32; j++) {
320 irq_set_lockdep_class(j, &gpio_lock_class);
321 irq_set_chip_and_handler(j, &gpio_irq_chip,
322 handle_level_irq);
323 set_irq_flags(j, IRQF_VALID);
324 }
325
326 /* register gpio chip */
327 port[i].chip.direction_input = mxc_gpio_direction_input;
328 port[i].chip.direction_output = mxc_gpio_direction_output;
329 port[i].chip.get = mxc_gpio_get;
330 port[i].chip.set = mxc_gpio_set;
331 port[i].chip.base = i * 32;
332 port[i].chip.ngpio = 32;
333
334 spin_lock_init(&port[i].lock);
335
336 /* its a serious configuration bug when it fails */
337 BUG_ON( gpiochip_add(&port[i].chip) < 0 );
338
339 if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
340 /* setup one handler for each entry */
341 irq_set_chained_handler(port[i].irq,
342 mx3_gpio_irq_handler);
343 irq_set_handler_data(port[i].irq, &port[i]);
344 if (port[i].irq_high) {
345 /* setup handler for GPIO 16 to 31 */
346 irq_set_chained_handler(port[i].irq_high,
347 mx3_gpio_irq_handler);
348 irq_set_handler_data(port[i].irq_high,
349 &port[i]);
350 }
351 }
352 }
353
354 if (cpu_is_mx2()) {
355 /* setup one handler for all GPIO interrupts */
356 irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler);
357 irq_set_handler_data(port[0].irq, port);
358 }
359
360 return 0;
361}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index da7991832af6..4e3d97890d69 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -43,6 +43,15 @@ extern void mx35_init_irq(void);
43extern void mx50_init_irq(void); 43extern void mx50_init_irq(void);
44extern void mx51_init_irq(void); 44extern void mx51_init_irq(void);
45extern void mx53_init_irq(void); 45extern void mx53_init_irq(void);
46extern void imx1_soc_init(void);
47extern void imx21_soc_init(void);
48extern void imx25_soc_init(void);
49extern void imx27_soc_init(void);
50extern void imx31_soc_init(void);
51extern void imx35_soc_init(void);
52extern void imx50_soc_init(void);
53extern void imx51_soc_init(void);
54extern void imx53_soc_init(void);
46extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); 55extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
47extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); 56extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
48extern int mx1_clocks_init(unsigned long fref); 57extern int mx1_clocks_init(unsigned long fref);
@@ -55,7 +64,8 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
55 unsigned long ckih1, unsigned long ckih2); 64 unsigned long ckih1, unsigned long ckih2);
56extern int mx53_clocks_init(unsigned long ckil, unsigned long osc, 65extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
57 unsigned long ckih1, unsigned long ckih2); 66 unsigned long ckih1, unsigned long ckih2);
58extern int mxc_register_gpios(void); 67extern struct platform_device *mxc_register_gpio(char *name, int id,
68 resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
59extern int mxc_register_device(struct platform_device *pdev, void *data); 69extern int mxc_register_device(struct platform_device *pdev, void *data);
60extern void mxc_set_cpu_type(unsigned int type); 70extern void mxc_set_cpu_type(unsigned int type);
61extern void mxc_arch_reset_init(void __iomem *); 71extern void mxc_arch_reset_init(void __iomem *);
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index fa8477337f91..03f626645374 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -10,6 +10,8 @@
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <linux/init.h> 11#include <linux/init.h>
12 12
13extern struct device mxc_aips_bus;
14
13struct platform_device *imx_add_platform_device_dmamask( 15struct platform_device *imx_add_platform_device_dmamask(
14 const char *name, int id, 16 const char *name, int id,
15 const struct resource *res, unsigned int num_resources, 17 const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
index a2747f12813e..31c820c1b796 100644
--- a/arch/arm/plat-mxc/include/mach/gpio.h
+++ b/arch/arm/plat-mxc/include/mach/gpio.h
@@ -36,31 +36,4 @@
36#define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio)) 36#define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio))
37#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) 37#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
38 38
39struct mxc_gpio_port {
40 void __iomem *base;
41 int irq;
42 int irq_high;
43 int virtual_irq_start;
44 struct gpio_chip chip;
45 u32 both_edges;
46 spinlock_t lock;
47};
48
49#define DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, _irq_high) \
50 { \
51 .chip.label = "gpio-" #_id, \
52 .irq = _irq, \
53 .irq_high = _irq_high, \
54 .base = soc ## _IO_ADDRESS( \
55 soc ## _GPIO ## _hwid ## _BASE_ADDR), \
56 .virtual_irq_start = MXC_GPIO_IRQ_START + (_id) * 32, \
57 }
58
59#define DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, _irq) \
60 DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, 0)
61#define DEFINE_IMX_GPIO_PORT(soc, _id, _hwid) \
62 DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, 0)
63
64int mxc_gpio_init(struct mxc_gpio_port*, int);
65
66#endif 39#endif
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 35c89bcdf758..00e812bbd81d 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -11,6 +11,8 @@
11#ifndef __ASM_ARCH_MXC_IRQS_H__ 11#ifndef __ASM_ARCH_MXC_IRQS_H__
12#define __ASM_ARCH_MXC_IRQS_H__ 12#define __ASM_ARCH_MXC_IRQS_H__
13 13
14#include <asm-generic/gpio.h>
15
14/* 16/*
15 * SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64 17 * SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64
16 */ 18 */
@@ -22,30 +24,13 @@
22 24
23#define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS 25#define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS
24 26
25/* these are ordered by size to support multi-SoC kernels */
26#if defined CONFIG_SOC_IMX53
27#define MXC_GPIO_IRQS (32 * 7)
28#elif defined CONFIG_ARCH_MX2
29#define MXC_GPIO_IRQS (32 * 6)
30#elif defined CONFIG_SOC_IMX50
31#define MXC_GPIO_IRQS (32 * 6)
32#elif defined CONFIG_ARCH_MX1
33#define MXC_GPIO_IRQS (32 * 4)
34#elif defined CONFIG_ARCH_MX25
35#define MXC_GPIO_IRQS (32 * 4)
36#elif defined CONFIG_SOC_IMX51
37#define MXC_GPIO_IRQS (32 * 4)
38#elif defined CONFIG_ARCH_MX3
39#define MXC_GPIO_IRQS (32 * 3)
40#endif
41
42/* 27/*
43 * The next 16 interrupts are for board specific purposes. Since 28 * The next 16 interrupts are for board specific purposes. Since
44 * the kernel can only run on one machine at a time, we can re-use 29 * the kernel can only run on one machine at a time, we can re-use
45 * these. If you need more, increase MXC_BOARD_IRQS, but keep it 30 * these. If you need more, increase MXC_BOARD_IRQS, but keep it
46 * within sensible limits. 31 * within sensible limits.
47 */ 32 */
48#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) 33#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS)
49 34
50#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 35#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
51#define MXC_BOARD_IRQS 80 36#define MXC_BOARD_IRQS 80
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index c44886062f8e..685c78716d95 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -10,6 +10,7 @@
10#define STE_DMA40_H 10#define STE_DMA40_H
11 11
12#include <linux/dmaengine.h> 12#include <linux/dmaengine.h>
13#include <linux/scatterlist.h>
13#include <linux/workqueue.h> 14#include <linux/workqueue.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15 16
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c9122dd6ee8d..964704f40bbe 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -480,13 +480,10 @@ static struct dentry *clk_debugfs_root;
480static int clk_debugfs_register_one(struct clk *c) 480static int clk_debugfs_register_one(struct clk *c)
481{ 481{
482 int err; 482 int err;
483 struct dentry *d, *child, *child_tmp; 483 struct dentry *d;
484 struct clk *pa = c->parent; 484 struct clk *pa = c->parent;
485 char s[255];
486 char *p = s;
487 485
488 p += sprintf(p, "%s", c->name); 486 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
489 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
490 if (!d) 487 if (!d)
491 return -ENOMEM; 488 return -ENOMEM;
492 c->dent = d; 489 c->dent = d;
@@ -509,10 +506,7 @@ static int clk_debugfs_register_one(struct clk *c)
509 return 0; 506 return 0;
510 507
511err_out: 508err_out:
512 d = c->dent; 509 debugfs_remove_recursive(c->dent);
513 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
514 debugfs_remove(child);
515 debugfs_remove(c->dent);
516 return err; 510 return err;
517} 511}
518 512
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00cb581..91e8de3db085 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -174,12 +174,32 @@ struct omap_gpio_dev_attr {
174 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ 174 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
175}; 175};
176 176
177struct omap_gpio_reg_offs {
178 u16 revision;
179 u16 direction;
180 u16 datain;
181 u16 dataout;
182 u16 set_dataout;
183 u16 clr_dataout;
184 u16 irqstatus;
185 u16 irqstatus2;
186 u16 irqenable;
187 u16 set_irqenable;
188 u16 clr_irqenable;
189 u16 debounce;
190 u16 debounce_en;
191
192 bool irqenable_inv;
193};
194
177struct omap_gpio_platform_data { 195struct omap_gpio_platform_data {
178 u16 virtual_irq_start; 196 u16 virtual_irq_start;
179 int bank_type; 197 int bank_type;
180 int bank_width; /* GPIO bank width */ 198 int bank_width; /* GPIO bank width */
181 int bank_stride; /* Only needed for omap1 MPUIO */ 199 int bank_stride; /* Only needed for omap1 MPUIO */
182 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ 200 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
201
202 struct omap_gpio_reg_offs *regs;
183}; 203};
184 204
185/* TODO: Analyze removing gpio_bank_count usage from driver code */ 205/* TODO: Analyze removing gpio_bank_count usage from driver code */
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349ff9fd8..ee405b36df4b 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -44,6 +44,10 @@ extern struct device omap_device_parent;
44#define OMAP_DEVICE_STATE_IDLE 2 44#define OMAP_DEVICE_STATE_IDLE 2
45#define OMAP_DEVICE_STATE_SHUTDOWN 3 45#define OMAP_DEVICE_STATE_SHUTDOWN 3
46 46
47/* omap_device.flags values */
48#define OMAP_DEVICE_SUSPENDED BIT(0)
49#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1)
50
47/** 51/**
48 * struct omap_device - omap_device wrapper for platform_devices 52 * struct omap_device - omap_device wrapper for platform_devices
49 * @pdev: platform_device 53 * @pdev: platform_device
@@ -73,6 +77,7 @@ struct omap_device {
73 s8 pm_lat_level; 77 s8 pm_lat_level;
74 u8 hwmods_cnt; 78 u8 hwmods_cnt;
75 u8 _state; 79 u8 _state;
80 u8 flags;
76}; 81};
77 82
78/* Device driver interface (call via platform_data fn ptrs) */ 83/* Device driver interface (call via platform_data fn ptrs) */
@@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od);
117int omap_device_disable_clocks(struct omap_device *od); 122int omap_device_disable_clocks(struct omap_device *od);
118int omap_device_enable_clocks(struct omap_device *od); 123int omap_device_enable_clocks(struct omap_device *od);
119 124
125static inline void omap_device_disable_idle_on_suspend(struct omap_device *od)
126{
127 od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND;
128}
120 129
121/* 130/*
122 * Entries should be kept in latency order ascending 131 * Entries should be kept in latency order ascending
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df0c21f..2526fa312b8a 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od)
537 return 0; 537 return 0;
538} 538}
539 539
540#ifdef CONFIG_PM_RUNTIME
540static int _od_runtime_suspend(struct device *dev) 541static int _od_runtime_suspend(struct device *dev)
541{ 542{
542 struct platform_device *pdev = to_platform_device(dev); 543 struct platform_device *pdev = to_platform_device(dev);
@@ -563,13 +564,55 @@ static int _od_runtime_resume(struct device *dev)
563 564
564 return pm_generic_runtime_resume(dev); 565 return pm_generic_runtime_resume(dev);
565} 566}
567#endif
566 568
567static struct dev_power_domain omap_device_power_domain = { 569#ifdef CONFIG_SUSPEND
570static int _od_suspend_noirq(struct device *dev)
571{
572 struct platform_device *pdev = to_platform_device(dev);
573 struct omap_device *od = to_omap_device(pdev);
574 int ret;
575
576 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
577 return pm_generic_suspend_noirq(dev);
578
579 ret = pm_generic_suspend_noirq(dev);
580
581 if (!ret && !pm_runtime_status_suspended(dev)) {
582 if (pm_generic_runtime_suspend(dev) == 0) {
583 omap_device_idle(pdev);
584 od->flags |= OMAP_DEVICE_SUSPENDED;
585 }
586 }
587
588 return ret;
589}
590
591static int _od_resume_noirq(struct device *dev)
592{
593 struct platform_device *pdev = to_platform_device(dev);
594 struct omap_device *od = to_omap_device(pdev);
595
596 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
597 return pm_generic_resume_noirq(dev);
598
599 if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
600 !pm_runtime_status_suspended(dev)) {
601 od->flags &= ~OMAP_DEVICE_SUSPENDED;
602 omap_device_enable(pdev);
603 pm_generic_runtime_resume(dev);
604 }
605
606 return pm_generic_resume_noirq(dev);
607}
608#endif
609
610static struct dev_pm_domain omap_device_pm_domain = {
568 .ops = { 611 .ops = {
569 .runtime_suspend = _od_runtime_suspend, 612 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
570 .runtime_idle = _od_runtime_idle, 613 _od_runtime_idle)
571 .runtime_resume = _od_runtime_resume,
572 USE_PLATFORM_PM_SLEEP_OPS 614 USE_PLATFORM_PM_SLEEP_OPS
615 SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq)
573 } 616 }
574}; 617};
575 618
@@ -586,7 +629,7 @@ int omap_device_register(struct omap_device *od)
586 pr_debug("omap_device: %s: registering\n", od->pdev.name); 629 pr_debug("omap_device: %s: registering\n", od->pdev.name);
587 630
588 od->pdev.dev.parent = &omap_device_parent; 631 od->pdev.dev.parent = &omap_device_parent;
589 od->pdev.dev.pwr_domain = &omap_device_power_domain; 632 od->pdev.dev.pm_domain = &omap_device_pm_domain;
590 return platform_device_register(&od->pdev); 633 return platform_device_register(&od->pdev);
591} 634}
592 635
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 5b4fffab1eb4..41ab97ebe4cf 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
432 ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; 432 ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
433 ct->regs.ack = GPIO_EDGE_CAUSE_OFF; 433 ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
434 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 434 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
435 ct->chip.irq_ack = irq_gc_ack; 435 ct->chip.irq_ack = irq_gc_ack_clr_bit;
436 ct->chip.irq_mask = irq_gc_mask_clr_bit; 436 ct->chip.irq_mask = irq_gc_mask_clr_bit;
437 ct->chip.irq_unmask = irq_gc_mask_set_bit; 437 ct->chip.irq_unmask = irq_gc_mask_set_bit;
438 ct->chip.irq_set_type = gpio_irq_set_type; 438 ct->chip.irq_set_type = gpio_irq_set_type;
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index 48ebb9479b61..a11dc3670505 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
50 return container_of(c, struct pxa_gpio_chip, chip)->regbase; 50 return container_of(c, struct pxa_gpio_chip, chip)->regbase;
51} 51}
52 52
53static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) 53static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
54{ 54{
55 return &pxa_gpio_chips[gpio_to_bank(gpio)]; 55 return &pxa_gpio_chips[gpio_to_bank(gpio)];
56} 56}
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
161 int gpio = irq_to_gpio(d->irq); 161 int gpio = irq_to_gpio(d->irq);
162 unsigned long gpdr, mask = GPIO_bit(gpio); 162 unsigned long gpdr, mask = GPIO_bit(gpio);
163 163
164 c = gpio_to_chip(gpio); 164 c = gpio_to_pxachip(gpio);
165 165
166 if (type == IRQ_TYPE_PROBE) { 166 if (type == IRQ_TYPE_PROBE) {
167 /* Don't mess with enabled GPIOs using preconfigured edges or 167 /* Don't mess with enabled GPIOs using preconfigured edges or
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
230static void pxa_ack_muxed_gpio(struct irq_data *d) 230static void pxa_ack_muxed_gpio(struct irq_data *d)
231{ 231{
232 int gpio = irq_to_gpio(d->irq); 232 int gpio = irq_to_gpio(d->irq);
233 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 233 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
234 234
235 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); 235 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
236} 236}
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
238static void pxa_mask_muxed_gpio(struct irq_data *d) 238static void pxa_mask_muxed_gpio(struct irq_data *d)
239{ 239{
240 int gpio = irq_to_gpio(d->irq); 240 int gpio = irq_to_gpio(d->irq);
241 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 241 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
242 uint32_t grer, gfer; 242 uint32_t grer, gfer;
243 243
244 c->irq_mask &= ~GPIO_bit(gpio); 244 c->irq_mask &= ~GPIO_bit(gpio);
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
252static void pxa_unmask_muxed_gpio(struct irq_data *d) 252static void pxa_unmask_muxed_gpio(struct irq_data *d)
253{ 253{
254 int gpio = irq_to_gpio(d->irq); 254 int gpio = irq_to_gpio(d->irq);
255 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 255 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
256 256
257 c->irq_mask |= GPIO_bit(gpio); 257 c->irq_mask |= GPIO_bit(gpio);
258 update_edge_detect(c); 258 update_edge_detect(c);
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
deleted file mode 100644
index 1ab332e37d7d..000000000000
--- a/arch/arm/plat-pxa/include/plat/sdhci.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
2 *
3 * Copyright 2010 Marvell
4 * Zhangfei Gao <zhangfei.gao@marvell.com>
5 *
6 * PXA Platform - SDHCI platform data definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __PLAT_PXA_SDHCI_H
14#define __PLAT_PXA_SDHCI_H
15
16/* pxa specific flag */
17/* Require clock free running */
18#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
19
20/* Board design supports 8-bit data on SD/SDIO BUS */
21#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
22
23/*
24 * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
25 * @max_speed: the maximum speed supported
26 * @quirks: quirks of specific device
27 * @flags: flags for platform requirement
28 */
29struct sdhci_pxa_platdata {
30 unsigned int max_speed;
31 unsigned int quirks;
32 unsigned int flags;
33};
34
35#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 2abf9660bc6c..539bd0e3defd 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
712 * get control of an dma channel 712 * get control of an dma channel
713*/ 713*/
714 714
715int s3c2410_dma_request(unsigned int channel, 715int s3c2410_dma_request(enum dma_ch channel,
716 struct s3c2410_dma_client *client, 716 struct s3c2410_dma_client *client,
717 void *dev) 717 void *dev)
718{ 718{
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
783 * allowed to go through. 783 * allowed to go through.
784*/ 784*/
785 785
786int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) 786int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
787{ 787{
788 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 788 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
789 unsigned long flags; 789 unsigned long flags;
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
974} 974}
975 975
976int 976int
977s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) 977s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
978{ 978{
979 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 979 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
980 980
@@ -1021,23 +1021,19 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
1021 * xfersize: size of unit in bytes (1,2,4) 1021 * xfersize: size of unit in bytes (1,2,4)
1022*/ 1022*/
1023 1023
1024int s3c2410_dma_config(unsigned int channel, 1024int s3c2410_dma_config(enum dma_ch channel,
1025 int xferunit) 1025 int xferunit)
1026{ 1026{
1027 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 1027 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1028 unsigned int dcon; 1028 unsigned int dcon;
1029 1029
1030 pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n", 1030 pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
1031 __func__, channel, xferunit, dcon);
1032 1031
1033 if (chan == NULL) 1032 if (chan == NULL)
1034 return -EINVAL; 1033 return -EINVAL;
1035 1034
1036 pr_debug("%s: Initial dcon is %08x\n", __func__, dcon);
1037
1038 dcon = chan->dcon & dma_sel.dcon_mask; 1035 dcon = chan->dcon & dma_sel.dcon_mask;
1039 1036 pr_debug("%s: dcon is %08x\n", __func__, dcon);
1040 pr_debug("%s: New dcon is %08x\n", __func__, dcon);
1041 1037
1042 switch (chan->req_ch) { 1038 switch (chan->req_ch) {
1043 case DMACH_I2S_IN: 1039 case DMACH_I2S_IN:
@@ -1104,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
1104 * devaddr: physical address of the source 1100 * devaddr: physical address of the source
1105*/ 1101*/
1106 1102
1107int s3c2410_dma_devconfig(unsigned int channel, 1103int s3c2410_dma_devconfig(enum dma_ch channel,
1108 enum s3c2410_dmasrc source, 1104 enum s3c2410_dmasrc source,
1109 unsigned long devaddr) 1105 unsigned long devaddr)
1110{ 1106{
@@ -1177,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
1177 * returns the current transfer points for the dma source and destination 1173 * returns the current transfer points for the dma source and destination
1178*/ 1174*/
1179 1175
1180int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) 1176int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
1181{ 1177{
1182 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 1178 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1183 1179
@@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1235 /* restore channel's hardware configuration */ 1231 /* restore channel's hardware configuration */
1236 1232
1237 if (!cp->in_use) 1233 if (!cp->in_use)
1238 return 0; 1234 return;
1239 1235
1240 printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); 1236 printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
1241 1237
@@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1246 1242
1247 if (cp->map != NULL) 1243 if (cp->map != NULL)
1248 dma_sel.select(cp, cp->map); 1244 dma_sel.select(cp, cp->map);
1249
1250 return 0;
1251} 1245}
1252 1246
1253static void s3c2410_dma_resume(void) 1247static void s3c2410_dma_resume(void)
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 135abda31c9a..327ab9f662e8 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
152 if (!gc) 152 if (!gc)
153 return -ENOMEM; 153 return -ENOMEM;
154 ct = gc->chip_types; 154 ct = gc->chip_types;
155 ct->chip.irq_ack = irq_gc_ack; 155 ct->chip.irq_ack = irq_gc_ack_set_bit;
156 ct->chip.irq_mask = irq_gc_mask_set_bit; 156 ct->chip.irq_mask = irq_gc_mask_set_bit;
157 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 157 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
158 ct->chip.irq_set_type = s5p_gpioint_set_type, 158 ct->chip.irq_set_type = s5p_gpioint_set_type,
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index 899a8cc011ff..612934c48b0d 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void)
370 370
371 clock_rate = clk_get_rate(tin_source); 371 clock_rate = clk_get_rate(tin_source);
372 372
373 init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
374
375 s5p_time_setup(timer_source.source_id, TCNT_MAX); 373 s5p_time_setup(timer_source.source_id, TCNT_MAX);
376 s5p_time_start(timer_source.source_id, PERIODIC); 374 s5p_time_start(timer_source.source_id, PERIODIC);
377 375
376 init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
377
378 if (clocksource_register_hz(&time_clocksource, clock_rate)) 378 if (clocksource_register_hz(&time_clocksource, clock_rate))
379 panic("%s: can't register clocksource\n", time_clocksource.name); 379 panic("%s: can't register clocksource\n", time_clocksource.name);
380} 380}
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 772892826ffc..0c9f95d98561 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -458,7 +458,7 @@ static struct dentry *clk_debugfs_root;
458static int clk_debugfs_register_one(struct clk *c) 458static int clk_debugfs_register_one(struct clk *c)
459{ 459{
460 int err; 460 int err;
461 struct dentry *d, *child, *child_tmp; 461 struct dentry *d;
462 struct clk *pa = c->parent; 462 struct clk *pa = c->parent;
463 char s[255]; 463 char s[255];
464 char *p = s; 464 char *p = s;
@@ -488,10 +488,7 @@ static int clk_debugfs_register_one(struct clk *c)
488 return 0; 488 return 0;
489 489
490err_out: 490err_out:
491 d = c->dent; 491 debugfs_remove_recursive(c->dent);
492 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
493 debugfs_remove(child);
494 debugfs_remove(c->dent);
495 return err; 492 return err;
496} 493}
497 494
diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c
index cb459dd95459..6143aa147688 100644
--- a/arch/arm/plat-samsung/dma.c
+++ b/arch/arm/plat-samsung/dma.c
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
41 * irq? 41 * irq?
42*/ 42*/
43 43
44int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) 44int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
45{ 45{
46 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 46 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
47 47
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
56} 56}
57EXPORT_SYMBOL(s3c2410_dma_set_opfn); 57EXPORT_SYMBOL(s3c2410_dma_set_opfn);
58 58
59int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) 59int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
60{ 60{
61 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 61 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
62 62
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
71} 71}
72EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); 72EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
73 73
74int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) 74int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
75{ 75{
76 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 76 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
77 77
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 4af108ff4112..e3b31c26ac3e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -12,6 +12,10 @@
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14*/ 14*/
15
16#ifndef __PLAT_DEVS_H
17#define __PLAT_DEVS_H __FILE__
18
15#include <linux/platform_device.h> 19#include <linux/platform_device.h>
16 20
17struct s3c24xx_uart_resources { 21struct s3c24xx_uart_resources {
@@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97;
159 */ 163 */
160extern void *s3c_set_platdata(void *pd, size_t pdsize, 164extern void *s3c_set_platdata(void *pd, size_t pdsize,
161 struct platform_device *pdev); 165 struct platform_device *pdev);
166
167#endif /* __PLAT_DEVS_H */
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
index 2e8f8c6560d7..8c273b7a6f56 100644
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ b/arch/arm/plat-samsung/include/plat/dma.h
@@ -42,6 +42,7 @@ struct s3c2410_dma_client {
42}; 42};
43 43
44struct s3c2410_dma_chan; 44struct s3c2410_dma_chan;
45enum dma_ch;
45 46
46/* s3c2410_dma_cbfn_t 47/* s3c2410_dma_cbfn_t
47 * 48 *
@@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
62 * request a dma channel exclusivley 63 * request a dma channel exclusivley
63*/ 64*/
64 65
65extern int s3c2410_dma_request(unsigned int channel, 66extern int s3c2410_dma_request(enum dma_ch channel,
66 struct s3c2410_dma_client *, void *dev); 67 struct s3c2410_dma_client *, void *dev);
67 68
68 69
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
71 * change the state of the dma channel 72 * change the state of the dma channel
72*/ 73*/
73 74
74extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); 75extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
75 76
76/* s3c2410_dma_setflags 77/* s3c2410_dma_setflags
77 * 78 *
78 * set the channel's flags to a given state 79 * set the channel's flags to a given state
79*/ 80*/
80 81
81extern int s3c2410_dma_setflags(unsigned int channel, 82extern int s3c2410_dma_setflags(enum dma_ch channel,
82 unsigned int flags); 83 unsigned int flags);
83 84
84/* s3c2410_dma_free 85/* s3c2410_dma_free
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
86 * free the dma channel (will also abort any outstanding operations) 87 * free the dma channel (will also abort any outstanding operations)
87*/ 88*/
88 89
89extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); 90extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
90 91
91/* s3c2410_dma_enqueue 92/* s3c2410_dma_enqueue
92 * 93 *
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
95 * drained before the buffer is given to the DMA system. 96 * drained before the buffer is given to the DMA system.
96*/ 97*/
97 98
98extern int s3c2410_dma_enqueue(unsigned int channel, void *id, 99extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
99 dma_addr_t data, int size); 100 dma_addr_t data, int size);
100 101
101/* s3c2410_dma_config 102/* s3c2410_dma_config
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
103 * configure the dma channel 104 * configure the dma channel
104*/ 105*/
105 106
106extern int s3c2410_dma_config(unsigned int channel, int xferunit); 107extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
107 108
108/* s3c2410_dma_devconfig 109/* s3c2410_dma_devconfig
109 * 110 *
110 * configure the device we're talking to 111 * configure the device we're talking to
111*/ 112*/
112 113
113extern int s3c2410_dma_devconfig(unsigned int channel, 114extern int s3c2410_dma_devconfig(enum dma_ch channel,
114 enum s3c2410_dmasrc source, unsigned long devaddr); 115 enum s3c2410_dmasrc source, unsigned long devaddr);
115 116
116/* s3c2410_dma_getposition 117/* s3c2410_dma_getposition
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
118 * get the position that the dma transfer is currently at 119 * get the position that the dma transfer is currently at
119*/ 120*/
120 121
121extern int s3c2410_dma_getposition(unsigned int channel, 122extern int s3c2410_dma_getposition(enum dma_ch channel,
122 dma_addr_t *src, dma_addr_t *dest); 123 dma_addr_t *src, dma_addr_t *dest);
123 124
124extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); 125extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
125extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); 126extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
126 127
127 128
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index 0ffe34a21554..4c16fa3621bb 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo {
39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
40 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number 40 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number
41 * @high_speed: If the controller supports HIGH_SPEED_EN bit 41 * @high_speed: If the controller supports HIGH_SPEED_EN bit
42 * @tx_st_done: Depends on tx fifo_lvl field
42 */ 43 */
43struct s3c64xx_spi_info { 44struct s3c64xx_spi_info {
44 int src_clk_nr; 45 int src_clk_nr;
@@ -53,6 +54,7 @@ struct s3c64xx_spi_info {
53 int fifo_lvl_mask; 54 int fifo_lvl_mask;
54 int rx_lvl_offset; 55 int rx_lvl_offset;
55 int high_speed; 56 int high_speed;
57 int tx_st_done;
56}; 58};
57 59
58/** 60/**
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
index 32582c0958e3..657405c481d0 100644
--- a/arch/arm/plat-samsung/irq-uart.c
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
54 54
55 gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, 55 gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
56 handle_level_irq); 56 handle_level_irq);
57
58 if (!gc) {
59 pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
60 __func__, uirq->base_irq);
61 return;
62 }
63
57 ct = gc->chip_types; 64 ct = gc->chip_types;
58 ct->chip.irq_ack = irq_gc_ack; 65 ct->chip.irq_ack = irq_gc_ack_set_bit;
59 ct->chip.irq_mask = irq_gc_mask_set_bit; 66 ct->chip.irq_mask = irq_gc_mask_set_bit;
60 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 67 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
61 ct->regs.ack = S3C64XX_UINTP; 68 ct->regs.ack = S3C64XX_UINTP;
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index a607546ddbd0..f714d060370d 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
54 54
55 s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, 55 s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
56 S3C64XX_TINT_CSTAT, handle_level_irq); 56 S3C64XX_TINT_CSTAT, handle_level_irq);
57
58 if (!s3c_tgc) {
59 pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
60 __func__, timer_irq);
61 return;
62 }
63
57 ct = s3c_tgc->chip_types; 64 ct = s3c_tgc->chip_types;
58 ct->chip.irq_mask = irq_gc_mask_clr_bit; 65 ct->chip.irq_mask = irq_gc_mask_clr_bit;
59 ct->chip.irq_unmask = irq_gc_mask_set_bit; 66 ct->chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
index 6fa474cb398e..67dd00381ea6 100644
--- a/arch/arm/plat-spear/clock.c
+++ b/arch/arm/plat-spear/clock.c
@@ -916,7 +916,7 @@ static struct dentry *clk_debugfs_root;
916static int clk_debugfs_register_one(struct clk *c) 916static int clk_debugfs_register_one(struct clk *c)
917{ 917{
918 int err; 918 int err;
919 struct dentry *d, *child; 919 struct dentry *d;
920 struct clk *pa = c->pclk; 920 struct clk *pa = c->pclk;
921 char s[255]; 921 char s[255];
922 char *p = s; 922 char *p = s;
@@ -951,10 +951,7 @@ static int clk_debugfs_register_one(struct clk *c)
951 return 0; 951 return 0;
952 952
953err_out: 953err_out:
954 d = c->dent; 954 debugfs_remove_recursive(c->dent);
955 list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
956 debugfs_remove(child);
957 debugfs_remove(c->dent);
958 return err; 955 return err;
959} 956}
960 957
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index d619b17c4413..c7476295de80 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -953,6 +953,16 @@ config BFIN_GPTIMERS
953 To compile this driver as a module, choose M here: the module 953 To compile this driver as a module, choose M here: the module
954 will be called gptimers. 954 will be called gptimers.
955 955
956config HAVE_PWM
957 tristate "Enable PWM API support"
958 depends on BFIN_GPTIMERS
959 help
960 Enable support for the Pulse Width Modulation framework (as
961 found in linux/pwm.h).
962
963 To compile this driver as a module, choose M here: the module
964 will be called pwm.
965
956choice 966choice
957 prompt "Uncached DMA region" 967 prompt "Uncached DMA region"
958 default DMA_UNCACHED_1M 968 default DMA_UNCACHED_1M
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 1c0a82a10591..d7ff2aee3fbc 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -58,13 +58,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
58CONFIG_MTD=y 58CONFIG_MTD=y
59CONFIG_MTD_PARTITIONS=y 59CONFIG_MTD_PARTITIONS=y
60CONFIG_MTD_CMDLINE_PARTS=y 60CONFIG_MTD_CMDLINE_PARTS=y
61CONFIG_MTD_CHAR=m 61CONFIG_MTD_CHAR=y
62CONFIG_MTD_BLOCK=y 62CONFIG_MTD_BLOCK=y
63CONFIG_MTD_CFI=m 63CONFIG_MTD_CFI=y
64CONFIG_MTD_CFI_AMDSTD=m 64CONFIG_MTD_CFI_AMDSTD=y
65CONFIG_MTD_RAM=y 65CONFIG_MTD_RAM=y
66CONFIG_MTD_ROM=m 66CONFIG_MTD_ROM=m
67CONFIG_MTD_PHYSMAP=m 67CONFIG_MTD_PHYSMAP=y
68CONFIG_BLK_DEV_RAM=y 68CONFIG_BLK_DEV_RAM=y
69CONFIG_NETDEVICES=y 69CONFIG_NETDEVICES=y
70CONFIG_NET_ETHERNET=y 70CONFIG_NET_ETHERNET=y
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 9e7c5379d3ff..7a075eaf6041 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,5 +1,48 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3generic-y += auxvec.h
4generic-y += bitsperlong.h
5generic-y += bugs.h
6generic-y += cputime.h
7generic-y += current.h
8generic-y += device.h
9generic-y += div64.h
10generic-y += emergency-restart.h
11generic-y += errno.h
12generic-y += fb.h
13generic-y += futex.h
14generic-y += hw_irq.h
15generic-y += ioctl.h
16generic-y += ipcbuf.h
17generic-y += irq_regs.h
18generic-y += kdebug.h
19generic-y += kmap_types.h
20generic-y += local64.h
21generic-y += local.h
22generic-y += mman.h
23generic-y += msgbuf.h
24generic-y += param.h
25generic-y += percpu.h
26generic-y += pgalloc.h
27generic-y += resource.h
28generic-y += scatterlist.h
29generic-y += sembuf.h
30generic-y += serial.h
31generic-y += setup.h
32generic-y += shmbuf.h
33generic-y += shmparam.h
34generic-y += socket.h
35generic-y += sockios.h
36generic-y += statfs.h
37generic-y += termbits.h
38generic-y += termios.h
39generic-y += topology.h
40generic-y += types.h
41generic-y += ucontext.h
42generic-y += unaligned.h
43generic-y += user.h
44generic-y += xor.h
45
3header-y += bfin_sport.h 46header-y += bfin_sport.h
4header-y += cachectl.h 47header-y += cachectl.h
5header-y += fixed_code.h 48header-y += fixed_code.h
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index e48508957160..4c707dbe1ff9 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2009 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef __ARCH_BLACKFIN_ATOMIC__ 7#ifndef __ARCH_BLACKFIN_ATOMIC__
8#define __ARCH_BLACKFIN_ATOMIC__ 8#define __ARCH_BLACKFIN_ATOMIC__
@@ -76,11 +76,6 @@ static inline void atomic_set_mask(int mask, atomic_t *v)
76 __raw_atomic_set_asm(&v->counter, mask); 76 __raw_atomic_set_asm(&v->counter, mask);
77} 77}
78 78
79static inline int atomic_test_mask(int mask, atomic_t *v)
80{
81 return __raw_atomic_test_asm(&v->counter, mask);
82}
83
84/* Atomic operations are already serializing */ 79/* Atomic operations are already serializing */
85#define smp_mb__before_atomic_dec() barrier() 80#define smp_mb__before_atomic_dec() barrier()
86#define smp_mb__after_atomic_dec() barrier() 81#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/blackfin/include/asm/auxvec.h b/arch/blackfin/include/asm/auxvec.h
deleted file mode 100644
index 41fa68b71287..000000000000
--- a/arch/blackfin/include/asm/auxvec.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/auxvec.h>
diff --git a/arch/blackfin/include/asm/bitsperlong.h b/arch/blackfin/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/blackfin/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index eb7c1441d8f9..0928700b6bc4 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * Common header file for Blackfin family of processors. 2 * Common header file for Blackfin family of processors.
3 * 3 *
4 * Copyright 2004-2009 Analog Devices Inc. 4 * Copyright 2004-2009 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9#ifndef _BLACKFIN_H_ 9#ifndef _BLACKFIN_H_
diff --git a/arch/blackfin/include/asm/bugs.h b/arch/blackfin/include/asm/bugs.h
deleted file mode 100644
index 61791e1ad9f5..000000000000
--- a/arch/blackfin/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bugs.h>
diff --git a/arch/blackfin/include/asm/cputime.h b/arch/blackfin/include/asm/cputime.h
deleted file mode 100644
index 6d68ad7e0ea3..000000000000
--- a/arch/blackfin/include/asm/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/cputime.h>
diff --git a/arch/blackfin/include/asm/current.h b/arch/blackfin/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/blackfin/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/current.h>
diff --git a/arch/blackfin/include/asm/device.h b/arch/blackfin/include/asm/device.h
deleted file mode 100644
index f0a4c256403b..000000000000
--- a/arch/blackfin/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/device.h>
diff --git a/arch/blackfin/include/asm/div64.h b/arch/blackfin/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/blackfin/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index edf2a2ad5183..c4ec959dad78 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -117,7 +117,6 @@
117#ifndef __ASSEMBLY__ 117#ifndef __ASSEMBLY__
118 118
119void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); 119void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
120void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
121void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); 120void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
122void do_hibernate(int wakeup); 121void do_hibernate(int wakeup);
123void set_dram_srfs(void); 122void set_dram_srfs(void);
@@ -134,32 +133,6 @@ struct bfin_dpmc_platform_data {
134 unsigned short vr_settling_time; /* in us */ 133 unsigned short vr_settling_time; /* in us */
135}; 134};
136 135
137#else
138
139#define PM_PUSH(x) \
140 R0 = [P0 + (x - SRAM_BASE_ADDRESS)];\
141 [--SP] = R0;\
142
143#define PM_POP(x) \
144 R0 = [SP++];\
145 [P0 + (x - SRAM_BASE_ADDRESS)] = R0;\
146
147#define PM_SYS_PUSH(x) \
148 R0 = [P0 + (x - PLL_CTL)];\
149 [--SP] = R0;\
150
151#define PM_SYS_POP(x) \
152 R0 = [SP++];\
153 [P0 + (x - PLL_CTL)] = R0;\
154
155#define PM_SYS_PUSH16(x) \
156 R0 = w[P0 + (x - PLL_CTL)];\
157 [--SP] = R0;\
158
159#define PM_SYS_POP16(x) \
160 R0 = [SP++];\
161 w[P0 + (x - PLL_CTL)] = R0;\
162
163#endif 136#endif
164 137
165#endif /*_BLACKFIN_DPMC_H_*/ 138#endif /*_BLACKFIN_DPMC_H_*/
diff --git a/arch/blackfin/include/asm/emergency-restart.h b/arch/blackfin/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9d50bd..000000000000
--- a/arch/blackfin/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/emergency-restart.h>
diff --git a/arch/blackfin/include/asm/errno.h b/arch/blackfin/include/asm/errno.h
deleted file mode 100644
index 4c82b503d92f..000000000000
--- a/arch/blackfin/include/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/errno.h>
diff --git a/arch/blackfin/include/asm/fb.h b/arch/blackfin/include/asm/fb.h
deleted file mode 100644
index 3a4988e8df45..000000000000
--- a/arch/blackfin/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fb.h>
diff --git a/arch/blackfin/include/asm/futex.h b/arch/blackfin/include/asm/futex.h
deleted file mode 100644
index 0b745828f42b..000000000000
--- a/arch/blackfin/include/asm/futex.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/futex.h>
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 1ef8417f5d27..5a25856381ff 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -16,58 +16,13 @@
16 16
17#include <mach/gpio.h> 17#include <mach/gpio.h>
18 18
19#define GPIO_0 0
20#define GPIO_1 1
21#define GPIO_2 2
22#define GPIO_3 3
23#define GPIO_4 4
24#define GPIO_5 5
25#define GPIO_6 6
26#define GPIO_7 7
27#define GPIO_8 8
28#define GPIO_9 9
29#define GPIO_10 10
30#define GPIO_11 11
31#define GPIO_12 12
32#define GPIO_13 13
33#define GPIO_14 14
34#define GPIO_15 15
35#define GPIO_16 16
36#define GPIO_17 17
37#define GPIO_18 18
38#define GPIO_19 19
39#define GPIO_20 20
40#define GPIO_21 21
41#define GPIO_22 22
42#define GPIO_23 23
43#define GPIO_24 24
44#define GPIO_25 25
45#define GPIO_26 26
46#define GPIO_27 27
47#define GPIO_28 28
48#define GPIO_29 29
49#define GPIO_30 30
50#define GPIO_31 31
51#define GPIO_32 32
52#define GPIO_33 33
53#define GPIO_34 34
54#define GPIO_35 35
55#define GPIO_36 36
56#define GPIO_37 37
57#define GPIO_38 38
58#define GPIO_39 39
59#define GPIO_40 40
60#define GPIO_41 41
61#define GPIO_42 42
62#define GPIO_43 43
63#define GPIO_44 44
64#define GPIO_45 45
65#define GPIO_46 46
66#define GPIO_47 47
67
68#define PERIPHERAL_USAGE 1 19#define PERIPHERAL_USAGE 1
69#define GPIO_USAGE 0 20#define GPIO_USAGE 0
70 21
22#ifndef BFIN_GPIO_PINT
23# define BFIN_GPIO_PINT 0
24#endif
25
71#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
72 27
73#include <linux/compiler.h> 28#include <linux/compiler.h>
@@ -89,7 +44,7 @@
89* MODIFICATION HISTORY : 44* MODIFICATION HISTORY :
90**************************************************************/ 45**************************************************************/
91 46
92#ifndef CONFIG_BF54x 47#if !BFIN_GPIO_PINT
93void set_gpio_dir(unsigned, unsigned short); 48void set_gpio_dir(unsigned, unsigned short);
94void set_gpio_inen(unsigned, unsigned short); 49void set_gpio_inen(unsigned, unsigned short);
95void set_gpio_polar(unsigned, unsigned short); 50void set_gpio_polar(unsigned, unsigned short);
@@ -164,6 +119,10 @@ struct gpio_port_t {
164#ifdef BFIN_SPECIAL_GPIO_BANKS 119#ifdef BFIN_SPECIAL_GPIO_BANKS
165void bfin_special_gpio_free(unsigned gpio); 120void bfin_special_gpio_free(unsigned gpio);
166int bfin_special_gpio_request(unsigned gpio, const char *label); 121int bfin_special_gpio_request(unsigned gpio, const char *label);
122# ifdef CONFIG_PM
123void bfin_special_gpio_pm_hibernate_restore(void);
124void bfin_special_gpio_pm_hibernate_suspend(void);
125# endif
167#endif 126#endif
168 127
169#ifdef CONFIG_PM 128#ifdef CONFIG_PM
@@ -182,7 +141,7 @@ static inline void bfin_pm_standby_restore(void)
182void bfin_gpio_pm_hibernate_restore(void); 141void bfin_gpio_pm_hibernate_restore(void);
183void bfin_gpio_pm_hibernate_suspend(void); 142void bfin_gpio_pm_hibernate_suspend(void);
184 143
185#ifndef CONFIG_BF54x 144# if !BFIN_GPIO_PINT
186int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl); 145int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl);
187 146
188struct gpio_port_s { 147struct gpio_port_s {
@@ -199,8 +158,9 @@ struct gpio_port_s {
199 unsigned short reserved; 158 unsigned short reserved;
200 unsigned short mux; 159 unsigned short mux;
201}; 160};
202#endif /*CONFIG_BF54x*/ 161# endif
203#endif /*CONFIG_PM*/ 162#endif /*CONFIG_PM*/
163
204/*********************************************************** 164/***********************************************************
205* 165*
206* FUNCTIONS: Blackfin GPIO Driver 166* FUNCTIONS: Blackfin GPIO Driver
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index 38657dac1235..38bddcb190c8 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -193,6 +193,16 @@ uint16_t get_enabled_gptimers(void);
193uint32_t get_gptimer_status(unsigned int group); 193uint32_t get_gptimer_status(unsigned int group);
194void set_gptimer_status(unsigned int group, uint32_t value); 194void set_gptimer_status(unsigned int group, uint32_t value);
195 195
196static inline void enable_gptimer(unsigned int timer_id)
197{
198 enable_gptimers(1 << timer_id);
199}
200
201static inline void disable_gptimer(unsigned int timer_id)
202{
203 disable_gptimers(1 << timer_id);
204}
205
196/* 206/*
197 * All Blackfin system MMRs are padded to 32bits even if the register 207 * All Blackfin system MMRs are padded to 32bits even if the register
198 * itself is only 16bits. So use a helper macro to streamline this. 208 * itself is only 16bits. So use a helper macro to streamline this.
@@ -209,6 +219,15 @@ struct bfin_gptimer_regs {
209 u32 width; 219 u32 width;
210}; 220};
211 221
222/*
223 * bfin group timer registers layout
224 */
225struct bfin_gptimer_group_regs {
226 __BFP(enable);
227 __BFP(disable);
228 u32 status;
229};
230
212#undef __BFP 231#undef __BFP
213 232
214#endif 233#endif
diff --git a/arch/blackfin/include/asm/hw_irq.h b/arch/blackfin/include/asm/hw_irq.h
deleted file mode 100644
index 1f5ef7da0045..000000000000
--- a/arch/blackfin/include/asm/hw_irq.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hw_irq.h>
diff --git a/arch/blackfin/include/asm/ioctl.h b/arch/blackfin/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/blackfin/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/blackfin/include/asm/ipcbuf.h b/arch/blackfin/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/blackfin/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/blackfin/include/asm/irq_regs.h b/arch/blackfin/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/blackfin/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index b4bbb75a9e15..43eb4749de3d 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -18,12 +18,12 @@
18extern unsigned long bfin_irq_flags; 18extern unsigned long bfin_irq_flags;
19#endif 19#endif
20 20
21static inline void bfin_sti(unsigned long flags) 21static inline notrace void bfin_sti(unsigned long flags)
22{ 22{
23 asm volatile("sti %0;" : : "d" (flags)); 23 asm volatile("sti %0;" : : "d" (flags));
24} 24}
25 25
26static inline unsigned long bfin_cli(void) 26static inline notrace unsigned long bfin_cli(void)
27{ 27{
28 unsigned long flags; 28 unsigned long flags;
29 asm volatile("cli %0;" : "=d" (flags)); 29 asm volatile("cli %0;" : "=d" (flags));
@@ -40,22 +40,22 @@ static inline unsigned long bfin_cli(void)
40/* 40/*
41 * Hard, untraced CPU interrupt flag manipulation and access. 41 * Hard, untraced CPU interrupt flag manipulation and access.
42 */ 42 */
43static inline void __hard_local_irq_disable(void) 43static inline notrace void __hard_local_irq_disable(void)
44{ 44{
45 bfin_cli(); 45 bfin_cli();
46} 46}
47 47
48static inline void __hard_local_irq_enable(void) 48static inline notrace void __hard_local_irq_enable(void)
49{ 49{
50 bfin_sti(bfin_irq_flags); 50 bfin_sti(bfin_irq_flags);
51} 51}
52 52
53static inline unsigned long hard_local_save_flags(void) 53static inline notrace unsigned long hard_local_save_flags(void)
54{ 54{
55 return bfin_read_IMASK(); 55 return bfin_read_IMASK();
56} 56}
57 57
58static inline unsigned long __hard_local_irq_save(void) 58static inline notrace unsigned long __hard_local_irq_save(void)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 flags = bfin_cli(); 61 flags = bfin_cli();
@@ -65,18 +65,18 @@ static inline unsigned long __hard_local_irq_save(void)
65 return flags; 65 return flags;
66} 66}
67 67
68static inline int hard_irqs_disabled_flags(unsigned long flags) 68static inline notrace int hard_irqs_disabled_flags(unsigned long flags)
69{ 69{
70 return (flags & ~0x3f) == 0; 70 return (flags & ~0x3f) == 0;
71} 71}
72 72
73static inline int hard_irqs_disabled(void) 73static inline notrace int hard_irqs_disabled(void)
74{ 74{
75 unsigned long flags = hard_local_save_flags(); 75 unsigned long flags = hard_local_save_flags();
76 return hard_irqs_disabled_flags(flags); 76 return hard_irqs_disabled_flags(flags);
77} 77}
78 78
79static inline void __hard_local_irq_restore(unsigned long flags) 79static inline notrace void __hard_local_irq_restore(unsigned long flags)
80{ 80{
81 if (!hard_irqs_disabled_flags(flags)) 81 if (!hard_irqs_disabled_flags(flags))
82 __hard_local_irq_enable(); 82 __hard_local_irq_enable();
@@ -113,31 +113,31 @@ void ipipe_check_context(struct ipipe_domain *ipd);
113/* 113/*
114 * Interrupt pipe interface to linux/irqflags.h. 114 * Interrupt pipe interface to linux/irqflags.h.
115 */ 115 */
116static inline void arch_local_irq_disable(void) 116static inline notrace void arch_local_irq_disable(void)
117{ 117{
118 __check_irqop_context(); 118 __check_irqop_context();
119 __ipipe_stall_root(); 119 __ipipe_stall_root();
120 barrier(); 120 barrier();
121} 121}
122 122
123static inline void arch_local_irq_enable(void) 123static inline notrace void arch_local_irq_enable(void)
124{ 124{
125 barrier(); 125 barrier();
126 __check_irqop_context(); 126 __check_irqop_context();
127 __ipipe_unstall_root(); 127 __ipipe_unstall_root();
128} 128}
129 129
130static inline unsigned long arch_local_save_flags(void) 130static inline notrace unsigned long arch_local_save_flags(void)
131{ 131{
132 return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; 132 return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags;
133} 133}
134 134
135static inline int arch_irqs_disabled_flags(unsigned long flags) 135static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
136{ 136{
137 return flags == bfin_no_irqs; 137 return flags == bfin_no_irqs;
138} 138}
139 139
140static inline unsigned long arch_local_irq_save(void) 140static inline notrace unsigned long arch_local_irq_save(void)
141{ 141{
142 unsigned long flags; 142 unsigned long flags;
143 143
@@ -148,13 +148,13 @@ static inline unsigned long arch_local_irq_save(void)
148 return flags; 148 return flags;
149} 149}
150 150
151static inline void arch_local_irq_restore(unsigned long flags) 151static inline notrace void arch_local_irq_restore(unsigned long flags)
152{ 152{
153 __check_irqop_context(); 153 __check_irqop_context();
154 __ipipe_restore_root(flags == bfin_no_irqs); 154 __ipipe_restore_root(flags == bfin_no_irqs);
155} 155}
156 156
157static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real) 157static inline notrace unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
158{ 158{
159 /* 159 /*
160 * Merge virtual and real interrupt mask bits into a single 160 * Merge virtual and real interrupt mask bits into a single
@@ -163,7 +163,7 @@ static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
163 return (real & ~(1 << 31)) | ((virt != 0) << 31); 163 return (real & ~(1 << 31)) | ((virt != 0) << 31);
164} 164}
165 165
166static inline int arch_demangle_irq_bits(unsigned long *x) 166static inline notrace int arch_demangle_irq_bits(unsigned long *x)
167{ 167{
168 int virt = (*x & (1 << 31)) != 0; 168 int virt = (*x & (1 << 31)) != 0;
169 *x &= ~(1L << 31); 169 *x &= ~(1L << 31);
@@ -174,7 +174,7 @@ static inline int arch_demangle_irq_bits(unsigned long *x)
174 * Interface to various arch routines that may be traced. 174 * Interface to various arch routines that may be traced.
175 */ 175 */
176#ifdef CONFIG_IPIPE_TRACE_IRQSOFF 176#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
177static inline void hard_local_irq_disable(void) 177static inline notrace void hard_local_irq_disable(void)
178{ 178{
179 if (!hard_irqs_disabled()) { 179 if (!hard_irqs_disabled()) {
180 __hard_local_irq_disable(); 180 __hard_local_irq_disable();
@@ -182,7 +182,7 @@ static inline void hard_local_irq_disable(void)
182 } 182 }
183} 183}
184 184
185static inline void hard_local_irq_enable(void) 185static inline notrace void hard_local_irq_enable(void)
186{ 186{
187 if (hard_irqs_disabled()) { 187 if (hard_irqs_disabled()) {
188 ipipe_trace_end(0x80000000); 188 ipipe_trace_end(0x80000000);
@@ -190,7 +190,7 @@ static inline void hard_local_irq_enable(void)
190 } 190 }
191} 191}
192 192
193static inline unsigned long hard_local_irq_save(void) 193static inline notrace unsigned long hard_local_irq_save(void)
194{ 194{
195 unsigned long flags = hard_local_save_flags(); 195 unsigned long flags = hard_local_save_flags();
196 if (!hard_irqs_disabled_flags(flags)) { 196 if (!hard_irqs_disabled_flags(flags)) {
@@ -200,7 +200,7 @@ static inline unsigned long hard_local_irq_save(void)
200 return flags; 200 return flags;
201} 201}
202 202
203static inline void hard_local_irq_restore(unsigned long flags) 203static inline notrace void hard_local_irq_restore(unsigned long flags)
204{ 204{
205 if (!hard_irqs_disabled_flags(flags)) { 205 if (!hard_irqs_disabled_flags(flags)) {
206 ipipe_trace_end(0x80000001); 206 ipipe_trace_end(0x80000001);
diff --git a/arch/blackfin/include/asm/kdebug.h b/arch/blackfin/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/blackfin/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kdebug.h>
diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h
deleted file mode 100644
index 3575c64af42a..000000000000
--- a/arch/blackfin/include/asm/kmap_types.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kmap_types.h>
diff --git a/arch/blackfin/include/asm/local.h b/arch/blackfin/include/asm/local.h
deleted file mode 100644
index c11c530f74d0..000000000000
--- a/arch/blackfin/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local.h>
diff --git a/arch/blackfin/include/asm/local64.h b/arch/blackfin/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/blackfin/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/blackfin/include/asm/mman.h b/arch/blackfin/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/blackfin/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/blackfin/include/asm/module.h b/arch/blackfin/include/asm/module.h
index 4282b169ead9..ed5689b82c9f 100644
--- a/arch/blackfin/include/asm/module.h
+++ b/arch/blackfin/include/asm/module.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2008 Analog Devices Inc. 2 * Copyright 2004-2008 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _ASM_BFIN_MODULE_H 7#ifndef _ASM_BFIN_MODULE_H
8#define _ASM_BFIN_MODULE_H 8#define _ASM_BFIN_MODULE_H
diff --git a/arch/blackfin/include/asm/msgbuf.h b/arch/blackfin/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/blackfin/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/msgbuf.h>
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index f726e3a80ad0..ff6101aa2c71 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -1,76 +1 @@
1/* #include <asm-generic/mutex-dec.h>
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 *
8 * Copyright 2006-2009 Analog Devices Inc.
9 *
10 * Licensed under the GPL-2 or later.
11 */
12
13#ifndef _ASM_MUTEX_H
14#define _ASM_MUTEX_H
15
16#ifndef CONFIG_SMP
17#include <asm-generic/mutex.h>
18#else
19
20static inline void
21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{
23 if (unlikely(atomic_dec_return(count) < 0))
24 fail_fn(count);
25 else
26 smp_mb();
27}
28
29static inline int
30__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
31{
32 if (unlikely(atomic_dec_return(count) < 0))
33 return fail_fn(count);
34 else {
35 smp_mb();
36 return 0;
37 }
38}
39
40static inline void
41__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
42{
43 smp_mb();
44 if (unlikely(atomic_inc_return(count) <= 0))
45 fail_fn(count);
46}
47
48#define __mutex_slowpath_needs_to_unlock() 1
49
50static inline int
51__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
52{
53 /*
54 * We have two variants here. The cmpxchg based one is the best one
55 * because it never induce a false contention state. It is included
56 * here because architectures using the inc/dec algorithms over the
57 * xchg ones are much more likely to support cmpxchg natively.
58 *
59 * If not we fall back to the spinlock based variant - that is
60 * just as efficient (and simpler) as a 'destructive' probing of
61 * the mutex state would be.
62 */
63#ifdef __HAVE_ARCH_CMPXCHG
64 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
65 smp_mb();
66 return 1;
67 }
68 return 0;
69#else
70 return fail_fn(count);
71#endif
72}
73
74#endif
75
76#endif
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index d0ce975bcd48..7202404966f6 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2009 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _BLACKFIN_PAGE_H 7#ifndef _BLACKFIN_PAGE_H
8#define _BLACKFIN_PAGE_H 8#define _BLACKFIN_PAGE_H
diff --git a/arch/blackfin/include/asm/param.h b/arch/blackfin/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/blackfin/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/param.h>
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
index d49bb261d9b7..28c2498c9c98 100644
--- a/arch/blackfin/include/asm/pda.h
+++ b/arch/blackfin/include/asm/pda.h
@@ -54,6 +54,16 @@ struct blackfin_pda { /* Per-processor Data Area */
54#endif 54#endif
55}; 55};
56 56
57struct blackfin_initial_pda {
58 void *retx;
59#ifdef CONFIG_DEBUG_DOUBLEFAULT
60 void *dcplb_doublefault_addr;
61 void *icplb_doublefault_addr;
62 void *retx_doublefault;
63 unsigned seqstat_doublefault;
64#endif
65};
66
57extern struct blackfin_pda cpu_pda[]; 67extern struct blackfin_pda cpu_pda[];
58 68
59#endif /* __ASSEMBLY__ */ 69#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
deleted file mode 100644
index 06a959d67234..000000000000
--- a/arch/blackfin/include/asm/percpu.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/percpu.h>
diff --git a/arch/blackfin/include/asm/pgalloc.h b/arch/blackfin/include/asm/pgalloc.h
deleted file mode 100644
index f261cb7dda06..000000000000
--- a/arch/blackfin/include/asm/pgalloc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/pgalloc.h>
diff --git a/arch/blackfin/include/asm/resource.h b/arch/blackfin/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/arch/blackfin/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/resource.h>
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
deleted file mode 100644
index d177a1588958..000000000000
--- a/arch/blackfin/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _BLACKFIN_SCATTERLIST_H
2#define _BLACKFIN_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* !(_BLACKFIN_SCATTERLIST_H) */
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 14a3e66d9167..fbd408475725 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2009 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _BLACKFIN_SECTIONS_H 7#ifndef _BLACKFIN_SECTIONS_H
8#define _BLACKFIN_SECTIONS_H 8#define _BLACKFIN_SECTIONS_H
diff --git a/arch/blackfin/include/asm/sembuf.h b/arch/blackfin/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/blackfin/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sembuf.h>
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/blackfin/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/serial.h>
diff --git a/arch/blackfin/include/asm/setup.h b/arch/blackfin/include/asm/setup.h
deleted file mode 100644
index 552df83f1a49..000000000000
--- a/arch/blackfin/include/asm/setup.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/setup.h>
diff --git a/arch/blackfin/include/asm/shmbuf.h b/arch/blackfin/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/blackfin/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/shmbuf.h>
diff --git a/arch/blackfin/include/asm/shmparam.h b/arch/blackfin/include/asm/shmparam.h
deleted file mode 100644
index 93f30deb95d0..000000000000
--- a/arch/blackfin/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/shmparam.h>
diff --git a/arch/blackfin/include/asm/sigcontext.h b/arch/blackfin/include/asm/sigcontext.h
index ce4081a4d815..906bdc1f5fda 100644
--- a/arch/blackfin/include/asm/sigcontext.h
+++ b/arch/blackfin/include/asm/sigcontext.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2008 Analog Devices Inc. 2 * Copyright 2004-2008 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _ASM_BLACKFIN_SIGCONTEXT_H 7#ifndef _ASM_BLACKFIN_SIGCONTEXT_H
8#define _ASM_BLACKFIN_SIGCONTEXT_H 8#define _ASM_BLACKFIN_SIGCONTEXT_H
diff --git a/arch/blackfin/include/asm/socket.h b/arch/blackfin/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/blackfin/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/socket.h>
diff --git a/arch/blackfin/include/asm/sockios.h b/arch/blackfin/include/asm/sockios.h
deleted file mode 100644
index def6d4746ee7..000000000000
--- a/arch/blackfin/include/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sockios.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1f286e71c21f..2336093fca23 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2009 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef __BFIN_SPINLOCK_H 7#ifndef __BFIN_SPINLOCK_H
8#define __BFIN_SPINLOCK_H 8#define __BFIN_SPINLOCK_H
diff --git a/arch/blackfin/include/asm/statfs.h b/arch/blackfin/include/asm/statfs.h
deleted file mode 100644
index 0b91fe198c20..000000000000
--- a/arch/blackfin/include/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/statfs.h>
diff --git a/arch/blackfin/include/asm/termbits.h b/arch/blackfin/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/blackfin/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termbits.h>
diff --git a/arch/blackfin/include/asm/termios.h b/arch/blackfin/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/blackfin/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termios.h>
diff --git a/arch/blackfin/include/asm/topology.h b/arch/blackfin/include/asm/topology.h
deleted file mode 100644
index 5428f333a02c..000000000000
--- a/arch/blackfin/include/asm/topology.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/topology.h>
diff --git a/arch/blackfin/include/asm/types.h b/arch/blackfin/include/asm/types.h
deleted file mode 100644
index b9e79bc580dd..000000000000
--- a/arch/blackfin/include/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/types.h>
diff --git a/arch/blackfin/include/asm/ucontext.h b/arch/blackfin/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/blackfin/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ucontext.h>
diff --git a/arch/blackfin/include/asm/unaligned.h b/arch/blackfin/include/asm/unaligned.h
deleted file mode 100644
index 6cecbbb2111f..000000000000
--- a/arch/blackfin/include/asm/unaligned.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/unaligned.h>
diff --git a/arch/blackfin/include/asm/user.h b/arch/blackfin/include/asm/user.h
deleted file mode 100644
index 4792a60831e4..000000000000
--- a/arch/blackfin/include/asm/user.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/user.h>
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/blackfin/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/xor.h>
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index d550b24d9e9b..b7bdc42fe1a3 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
21obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 21obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
22CFLAGS_REMOVE_ftrace.o = -pg 22CFLAGS_REMOVE_ftrace.o = -pg
23 23
24obj-$(CONFIG_HAVE_PWM) += pwm.o
24obj-$(CONFIG_IPIPE) += ipipe.o 25obj-$(CONFIG_IPIPE) += ipipe.o
25obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o 26obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
26obj-$(CONFIG_CPLB_INFO) += cplbinfo.o 27obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index bd32c09b9349..17e35465a416 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -138,6 +138,16 @@ int main(void)
138 DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault)); 138 DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault));
139 DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault)); 139 DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault));
140#endif 140#endif
141
142 /* PDA initial management */
143 DEFINE(PDA_INIT_RETX, offsetof(struct blackfin_initial_pda, retx));
144#ifdef CONFIG_DEBUG_DOUBLEFAULT
145 DEFINE(PDA_INIT_DF_DCPLB, offsetof(struct blackfin_initial_pda, dcplb_doublefault_addr));
146 DEFINE(PDA_INIT_DF_ICPLB, offsetof(struct blackfin_initial_pda, icplb_doublefault_addr));
147 DEFINE(PDA_INIT_DF_SEQSTAT, offsetof(struct blackfin_initial_pda, seqstat_doublefault));
148 DEFINE(PDA_INIT_DF_RETX, offsetof(struct blackfin_initial_pda, retx_doublefault));
149#endif
150
141#ifdef CONFIG_SMP 151#ifdef CONFIG_SMP
142 /* Inter-core lock (in L2 SRAM) */ 152 /* Inter-core lock (in L2 SRAM) */
143 DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot)); 153 DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index bcf8cf6fe412..02796b88443d 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -118,6 +118,9 @@ static struct str_ident {
118 118
119#if defined(CONFIG_PM) 119#if defined(CONFIG_PM)
120static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM]; 120static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
121# ifdef BF538_FAMILY
122static unsigned short port_fer_saved[3];
123# endif
121#endif 124#endif
122 125
123static void gpio_error(unsigned gpio) 126static void gpio_error(unsigned gpio)
@@ -604,6 +607,11 @@ void bfin_gpio_pm_hibernate_suspend(void)
604{ 607{
605 int i, bank; 608 int i, bank;
606 609
610#ifdef BF538_FAMILY
611 for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
612 port_fer_saved[i] = *port_fer[i];
613#endif
614
607 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { 615 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
608 bank = gpio_bank(i); 616 bank = gpio_bank(i);
609 617
@@ -625,6 +633,10 @@ void bfin_gpio_pm_hibernate_suspend(void)
625 gpio_bank_saved[bank].maska = gpio_array[bank]->maska; 633 gpio_bank_saved[bank].maska = gpio_array[bank]->maska;
626 } 634 }
627 635
636#ifdef BFIN_SPECIAL_GPIO_BANKS
637 bfin_special_gpio_pm_hibernate_suspend();
638#endif
639
628 AWA_DUMMY_READ(maska); 640 AWA_DUMMY_READ(maska);
629} 641}
630 642
@@ -632,6 +644,11 @@ void bfin_gpio_pm_hibernate_restore(void)
632{ 644{
633 int i, bank; 645 int i, bank;
634 646
647#ifdef BF538_FAMILY
648 for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
649 *port_fer[i] = port_fer_saved[i];
650#endif
651
635 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { 652 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
636 bank = gpio_bank(i); 653 bank = gpio_bank(i);
637 654
@@ -653,6 +670,11 @@ void bfin_gpio_pm_hibernate_restore(void)
653 gpio_array[bank]->both = gpio_bank_saved[bank].both; 670 gpio_array[bank]->both = gpio_bank_saved[bank].both;
654 gpio_array[bank]->maska = gpio_bank_saved[bank].maska; 671 gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
655 } 672 }
673
674#ifdef BFIN_SPECIAL_GPIO_BANKS
675 bfin_special_gpio_pm_hibernate_restore();
676#endif
677
656 AWA_DUMMY_READ(maska); 678 AWA_DUMMY_READ(maska);
657} 679}
658 680
@@ -691,9 +713,9 @@ void bfin_gpio_pm_hibernate_restore(void)
691 gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux; 713 gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
692 gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer; 714 gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
693 gpio_array[bank]->inen = gpio_bank_saved[bank].inen; 715 gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
694 gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
695 gpio_array[bank]->data_set = gpio_bank_saved[bank].data 716 gpio_array[bank]->data_set = gpio_bank_saved[bank].data
696 | gpio_bank_saved[bank].dir; 717 & gpio_bank_saved[bank].dir;
718 gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
697 } 719 }
698} 720}
699#endif 721#endif
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index fce4807ceef9..92f664826281 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -27,7 +27,7 @@
27#define PORT_MUX BFIN_PORT_MUX 27#define PORT_MUX BFIN_PORT_MUX
28#endif 28#endif
29 29
30#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr) 30#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)(addr))
31#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR) 31#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
32#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR) 32#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
33#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR) 33#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
@@ -223,7 +223,8 @@ bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdm
223 __DMA(CURR_DESC_PTR, curr_desc_ptr); 223 __DMA(CURR_DESC_PTR, curr_desc_ptr);
224 __DMA(CURR_ADDR, curr_addr); 224 __DMA(CURR_ADDR, curr_addr);
225 __DMA(IRQ_STATUS, irq_status); 225 __DMA(IRQ_STATUS, irq_status);
226 __DMA(PERIPHERAL_MAP, peripheral_map); 226 if (strcmp(pfx, "IMDMA") != 0)
227 __DMA(PERIPHERAL_MAP, peripheral_map);
227 __DMA(CURR_X_COUNT, curr_x_count); 228 __DMA(CURR_X_COUNT, curr_x_count);
228 __DMA(CURR_Y_COUNT, curr_y_count); 229 __DMA(CURR_Y_COUNT, curr_y_count);
229} 230}
@@ -277,6 +278,32 @@ bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
277} 278}
278#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num) 279#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
279 280
281#define GPTIMER_GROUP_OFF(mmr) REGS_OFF(gptimer_group, mmr)
282#define __GPTIMER_GROUP(uname, lname) __REGS(gptimer_group, #uname, lname)
283static void __init __maybe_unused
284bfin_debug_mmrs_gptimer_group(struct dentry *parent, unsigned long base, int num)
285{
286 char buf[32], *_buf;
287
288 if (num == -1) {
289 _buf = buf + sprintf(buf, "TIMER_");
290 __GPTIMER_GROUP(ENABLE, enable);
291 __GPTIMER_GROUP(DISABLE, disable);
292 __GPTIMER_GROUP(STATUS, status);
293 } else {
294 /* These MMRs are a bit odd as the group # is a suffix */
295 _buf = buf + sprintf(buf, "TIMER_ENABLE%i", num);
296 d(buf, 16, base + GPTIMER_GROUP_OFF(enable));
297
298 _buf = buf + sprintf(buf, "TIMER_DISABLE%i", num);
299 d(buf, 16, base + GPTIMER_GROUP_OFF(disable));
300
301 _buf = buf + sprintf(buf, "TIMER_STATUS%i", num);
302 d(buf, 32, base + GPTIMER_GROUP_OFF(status));
303 }
304}
305#define GPTIMER_GROUP(mmr, num) bfin_debug_mmrs_gptimer_group(parent, mmr, num)
306
280/* 307/*
281 * Handshake MDMA 308 * Handshake MDMA
282 */ 309 */
@@ -296,6 +323,29 @@ bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
296#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num) 323#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
297 324
298/* 325/*
326 * Peripheral Interrupts (PINT/GPIO)
327 */
328#ifdef PINT0_MASK_SET
329#define __PINT(uname, lname) __REGS(pint, #uname, lname)
330static void __init __maybe_unused
331bfin_debug_mmrs_pint(struct dentry *parent, unsigned long base, int num)
332{
333 char buf[32], *_buf = REGS_STR_PFX(buf, PINT, num);
334 __PINT(MASK_SET, mask_set);
335 __PINT(MASK_CLEAR, mask_clear);
336 __PINT(REQUEST, request);
337 __PINT(ASSIGN, assign);
338 __PINT(EDGE_SET, edge_set);
339 __PINT(EDGE_CLEAR, edge_clear);
340 __PINT(INVERT_SET, invert_set);
341 __PINT(INVERT_CLEAR, invert_clear);
342 __PINT(PINSTATE, pinstate);
343 __PINT(LATCH, latch);
344}
345#define PINT(num) bfin_debug_mmrs_pint(parent, PINT##num##_MASK_SET, num)
346#endif
347
348/*
299 * Port/GPIO 349 * Port/GPIO
300 */ 350 */
301#define bfin_gpio_regs gpio_port_t 351#define bfin_gpio_regs gpio_port_t
@@ -747,7 +797,7 @@ static int __init bfin_debug_mmrs_init(void)
747#endif 797#endif
748 798
749 parent = debugfs_create_dir("dmac", top); 799 parent = debugfs_create_dir("dmac", top);
750#ifdef DMA_TC_CNT 800#ifdef DMAC_TC_CNT
751 D16(DMAC_TC_CNT); 801 D16(DMAC_TC_CNT);
752 D16(DMAC_TC_PER); 802 D16(DMAC_TC_PER);
753#endif 803#endif
@@ -1005,29 +1055,19 @@ static int __init bfin_debug_mmrs_init(void)
1005#endif 1055#endif
1006 1056
1007 parent = debugfs_create_dir("gptimer", top); 1057 parent = debugfs_create_dir("gptimer", top);
1008#ifdef TIMER_DISABLE 1058#ifdef TIMER_ENABLE
1009 D16(TIMER_DISABLE); 1059 GPTIMER_GROUP(TIMER_ENABLE, -1);
1010 D16(TIMER_ENABLE);
1011 D32(TIMER_STATUS);
1012#endif 1060#endif
1013#ifdef TIMER_DISABLE0 1061#ifdef TIMER_ENABLE0
1014 D16(TIMER_DISABLE0); 1062 GPTIMER_GROUP(TIMER_ENABLE0, 0);
1015 D16(TIMER_ENABLE0);
1016 D32(TIMER_STATUS0);
1017#endif 1063#endif
1018#ifdef TIMER_DISABLE1 1064#ifdef TIMER_ENABLE1
1019 D16(TIMER_DISABLE1); 1065 GPTIMER_GROUP(TIMER_ENABLE1, 1);
1020 D16(TIMER_ENABLE1);
1021 D32(TIMER_STATUS1);
1022#endif 1066#endif
1023 /* XXX: Should convert BF561 MMR names */ 1067 /* XXX: Should convert BF561 MMR names */
1024#ifdef TMRS4_DISABLE 1068#ifdef TMRS4_DISABLE
1025 D16(TMRS4_DISABLE); 1069 GPTIMER_GROUP(TMRS4_ENABLE, 0);
1026 D16(TMRS4_ENABLE); 1070 GPTIMER_GROUP(TMRS8_ENABLE, 1);
1027 D32(TMRS4_STATUS);
1028 D16(TMRS8_DISABLE);
1029 D16(TMRS8_ENABLE);
1030 D32(TMRS8_STATUS);
1031#endif 1071#endif
1032 GPTIMER(0); 1072 GPTIMER(0);
1033 GPTIMER(1); 1073 GPTIMER(1);
@@ -1253,6 +1293,14 @@ static int __init bfin_debug_mmrs_init(void)
1253 D32(OTP_DATA3); 1293 D32(OTP_DATA3);
1254#endif 1294#endif
1255 1295
1296#ifdef PINT0_MASK_SET
1297 parent = debugfs_create_dir("pint", top);
1298 PINT(0);
1299 PINT(1);
1300 PINT(2);
1301 PINT(3);
1302#endif
1303
1256#ifdef PIXC_CTL 1304#ifdef PIXC_CTL
1257 parent = debugfs_create_dir("pixc", top); 1305 parent = debugfs_create_dir("pixc", top);
1258 D16(PIXC_CTL); 1306 D16(PIXC_CTL);
@@ -1816,7 +1864,6 @@ static int __init bfin_debug_mmrs_init(void)
1816 { 1864 {
1817 int num; 1865 int num;
1818 unsigned long base; 1866 unsigned long base;
1819 char *_buf, buf[32];
1820 1867
1821 base = PORTA_FER; 1868 base = PORTA_FER;
1822 for (num = 0; num < 10; ++num) { 1869 for (num = 0; num < 10; ++num) {
@@ -1824,24 +1871,6 @@ static int __init bfin_debug_mmrs_init(void)
1824 base += sizeof(struct bfin_gpio_regs); 1871 base += sizeof(struct bfin_gpio_regs);
1825 } 1872 }
1826 1873
1827#define __PINT(uname, lname) __REGS(pint, #uname, lname)
1828 parent = debugfs_create_dir("pint", top);
1829 base = PINT0_MASK_SET;
1830 for (num = 0; num < 4; ++num) {
1831 _buf = REGS_STR_PFX(buf, PINT, num);
1832 __PINT(MASK_SET, mask_set);
1833 __PINT(MASK_CLEAR, mask_clear);
1834 __PINT(IRQ, irq);
1835 __PINT(ASSIGN, assign);
1836 __PINT(EDGE_SET, edge_set);
1837 __PINT(EDGE_CLEAR, edge_clear);
1838 __PINT(INVERT_SET, invert_set);
1839 __PINT(INVERT_CLEAR, invert_clear);
1840 __PINT(PINSTATE, pinstate);
1841 __PINT(LATCH, latch);
1842 base += sizeof(struct bfin_pint_regs);
1843 }
1844
1845 } 1874 }
1846#endif /* BF54x */ 1875#endif /* BF54x */
1847 1876
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 8b81dc04488a..06459f4bf43a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -25,49 +25,33 @@
25 25
26#define BFIN_TIMER_NUM_GROUP (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1) 26#define BFIN_TIMER_NUM_GROUP (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
27 27
28typedef struct { 28static struct bfin_gptimer_regs * const timer_regs[MAX_BLACKFIN_GPTIMERS] =
29 uint16_t config;
30 uint16_t __pad;
31 uint32_t counter;
32 uint32_t period;
33 uint32_t width;
34} GPTIMER_timer_regs;
35
36typedef struct {
37 uint16_t enable;
38 uint16_t __pad0;
39 uint16_t disable;
40 uint16_t __pad1;
41 uint32_t status;
42} GPTIMER_group_regs;
43
44static volatile GPTIMER_timer_regs *const timer_regs[MAX_BLACKFIN_GPTIMERS] =
45{ 29{
46 (GPTIMER_timer_regs *)TIMER0_CONFIG, 30 (void *)TIMER0_CONFIG,
47 (GPTIMER_timer_regs *)TIMER1_CONFIG, 31 (void *)TIMER1_CONFIG,
48 (GPTIMER_timer_regs *)TIMER2_CONFIG, 32 (void *)TIMER2_CONFIG,
49#if (MAX_BLACKFIN_GPTIMERS > 3) 33#if (MAX_BLACKFIN_GPTIMERS > 3)
50 (GPTIMER_timer_regs *)TIMER3_CONFIG, 34 (void *)TIMER3_CONFIG,
51 (GPTIMER_timer_regs *)TIMER4_CONFIG, 35 (void *)TIMER4_CONFIG,
52 (GPTIMER_timer_regs *)TIMER5_CONFIG, 36 (void *)TIMER5_CONFIG,
53 (GPTIMER_timer_regs *)TIMER6_CONFIG, 37 (void *)TIMER6_CONFIG,
54 (GPTIMER_timer_regs *)TIMER7_CONFIG, 38 (void *)TIMER7_CONFIG,
55# if (MAX_BLACKFIN_GPTIMERS > 8) 39# if (MAX_BLACKFIN_GPTIMERS > 8)
56 (GPTIMER_timer_regs *)TIMER8_CONFIG, 40 (void *)TIMER8_CONFIG,
57 (GPTIMER_timer_regs *)TIMER9_CONFIG, 41 (void *)TIMER9_CONFIG,
58 (GPTIMER_timer_regs *)TIMER10_CONFIG, 42 (void *)TIMER10_CONFIG,
59# if (MAX_BLACKFIN_GPTIMERS > 11) 43# if (MAX_BLACKFIN_GPTIMERS > 11)
60 (GPTIMER_timer_regs *)TIMER11_CONFIG, 44 (void *)TIMER11_CONFIG,
61# endif 45# endif
62# endif 46# endif
63#endif 47#endif
64}; 48};
65 49
66static volatile GPTIMER_group_regs *const group_regs[BFIN_TIMER_NUM_GROUP] = 50static struct bfin_gptimer_group_regs * const group_regs[BFIN_TIMER_NUM_GROUP] =
67{ 51{
68 (GPTIMER_group_regs *)TIMER0_GROUP_REG, 52 (void *)TIMER0_GROUP_REG,
69#if (MAX_BLACKFIN_GPTIMERS > 8) 53#if (MAX_BLACKFIN_GPTIMERS > 8)
70 (GPTIMER_group_regs *)TIMER8_GROUP_REG, 54 (void *)TIMER8_GROUP_REG,
71#endif 55#endif
72}; 56};
73 57
@@ -140,7 +124,7 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
140void set_gptimer_pwidth(unsigned int timer_id, uint32_t value) 124void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
141{ 125{
142 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 126 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
143 timer_regs[timer_id]->width = value; 127 bfin_write(&timer_regs[timer_id]->width, value);
144 SSYNC(); 128 SSYNC();
145} 129}
146EXPORT_SYMBOL(set_gptimer_pwidth); 130EXPORT_SYMBOL(set_gptimer_pwidth);
@@ -148,14 +132,14 @@ EXPORT_SYMBOL(set_gptimer_pwidth);
148uint32_t get_gptimer_pwidth(unsigned int timer_id) 132uint32_t get_gptimer_pwidth(unsigned int timer_id)
149{ 133{
150 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 134 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
151 return timer_regs[timer_id]->width; 135 return bfin_read(&timer_regs[timer_id]->width);
152} 136}
153EXPORT_SYMBOL(get_gptimer_pwidth); 137EXPORT_SYMBOL(get_gptimer_pwidth);
154 138
155void set_gptimer_period(unsigned int timer_id, uint32_t period) 139void set_gptimer_period(unsigned int timer_id, uint32_t period)
156{ 140{
157 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 141 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
158 timer_regs[timer_id]->period = period; 142 bfin_write(&timer_regs[timer_id]->period, period);
159 SSYNC(); 143 SSYNC();
160} 144}
161EXPORT_SYMBOL(set_gptimer_period); 145EXPORT_SYMBOL(set_gptimer_period);
@@ -163,71 +147,76 @@ EXPORT_SYMBOL(set_gptimer_period);
163uint32_t get_gptimer_period(unsigned int timer_id) 147uint32_t get_gptimer_period(unsigned int timer_id)
164{ 148{
165 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 149 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
166 return timer_regs[timer_id]->period; 150 return bfin_read(&timer_regs[timer_id]->period);
167} 151}
168EXPORT_SYMBOL(get_gptimer_period); 152EXPORT_SYMBOL(get_gptimer_period);
169 153
170uint32_t get_gptimer_count(unsigned int timer_id) 154uint32_t get_gptimer_count(unsigned int timer_id)
171{ 155{
172 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 156 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
173 return timer_regs[timer_id]->counter; 157 return bfin_read(&timer_regs[timer_id]->counter);
174} 158}
175EXPORT_SYMBOL(get_gptimer_count); 159EXPORT_SYMBOL(get_gptimer_count);
176 160
177uint32_t get_gptimer_status(unsigned int group) 161uint32_t get_gptimer_status(unsigned int group)
178{ 162{
179 tassert(group < BFIN_TIMER_NUM_GROUP); 163 tassert(group < BFIN_TIMER_NUM_GROUP);
180 return group_regs[group]->status; 164 return bfin_read(&group_regs[group]->status);
181} 165}
182EXPORT_SYMBOL(get_gptimer_status); 166EXPORT_SYMBOL(get_gptimer_status);
183 167
184void set_gptimer_status(unsigned int group, uint32_t value) 168void set_gptimer_status(unsigned int group, uint32_t value)
185{ 169{
186 tassert(group < BFIN_TIMER_NUM_GROUP); 170 tassert(group < BFIN_TIMER_NUM_GROUP);
187 group_regs[group]->status = value; 171 bfin_write(&group_regs[group]->status, value);
188 SSYNC(); 172 SSYNC();
189} 173}
190EXPORT_SYMBOL(set_gptimer_status); 174EXPORT_SYMBOL(set_gptimer_status);
191 175
176static uint32_t read_gptimer_status(unsigned int timer_id)
177{
178 return bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status);
179}
180
192int get_gptimer_intr(unsigned int timer_id) 181int get_gptimer_intr(unsigned int timer_id)
193{ 182{
194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 183 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
195 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]); 184 return !!(read_gptimer_status(timer_id) & timil_mask[timer_id]);
196} 185}
197EXPORT_SYMBOL(get_gptimer_intr); 186EXPORT_SYMBOL(get_gptimer_intr);
198 187
199void clear_gptimer_intr(unsigned int timer_id) 188void clear_gptimer_intr(unsigned int timer_id)
200{ 189{
201 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 190 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
202 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id]; 191 bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, timil_mask[timer_id]);
203} 192}
204EXPORT_SYMBOL(clear_gptimer_intr); 193EXPORT_SYMBOL(clear_gptimer_intr);
205 194
206int get_gptimer_over(unsigned int timer_id) 195int get_gptimer_over(unsigned int timer_id)
207{ 196{
208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 197 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
209 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]); 198 return !!(read_gptimer_status(timer_id) & tovf_mask[timer_id]);
210} 199}
211EXPORT_SYMBOL(get_gptimer_over); 200EXPORT_SYMBOL(get_gptimer_over);
212 201
213void clear_gptimer_over(unsigned int timer_id) 202void clear_gptimer_over(unsigned int timer_id)
214{ 203{
215 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 204 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
216 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id]; 205 bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, tovf_mask[timer_id]);
217} 206}
218EXPORT_SYMBOL(clear_gptimer_over); 207EXPORT_SYMBOL(clear_gptimer_over);
219 208
220int get_gptimer_run(unsigned int timer_id) 209int get_gptimer_run(unsigned int timer_id)
221{ 210{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 211 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
223 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]); 212 return !!(read_gptimer_status(timer_id) & trun_mask[timer_id]);
224} 213}
225EXPORT_SYMBOL(get_gptimer_run); 214EXPORT_SYMBOL(get_gptimer_run);
226 215
227void set_gptimer_config(unsigned int timer_id, uint16_t config) 216void set_gptimer_config(unsigned int timer_id, uint16_t config)
228{ 217{
229 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 218 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
230 timer_regs[timer_id]->config = config; 219 bfin_write(&timer_regs[timer_id]->config, config);
231 SSYNC(); 220 SSYNC();
232} 221}
233EXPORT_SYMBOL(set_gptimer_config); 222EXPORT_SYMBOL(set_gptimer_config);
@@ -235,7 +224,7 @@ EXPORT_SYMBOL(set_gptimer_config);
235uint16_t get_gptimer_config(unsigned int timer_id) 224uint16_t get_gptimer_config(unsigned int timer_id)
236{ 225{
237 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 226 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
238 return timer_regs[timer_id]->config; 227 return bfin_read(&timer_regs[timer_id]->config);
239} 228}
240EXPORT_SYMBOL(get_gptimer_config); 229EXPORT_SYMBOL(get_gptimer_config);
241 230
@@ -244,7 +233,7 @@ void enable_gptimers(uint16_t mask)
244 int i; 233 int i;
245 tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0); 234 tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
246 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) { 235 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
247 group_regs[i]->enable = mask & 0xFF; 236 bfin_write(&group_regs[i]->enable, mask & 0xFF);
248 mask >>= 8; 237 mask >>= 8;
249 } 238 }
250 SSYNC(); 239 SSYNC();
@@ -257,7 +246,7 @@ static void _disable_gptimers(uint16_t mask)
257 uint16_t m = mask; 246 uint16_t m = mask;
258 tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0); 247 tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
259 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) { 248 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
260 group_regs[i]->disable = m & 0xFF; 249 bfin_write(&group_regs[i]->disable, m & 0xFF);
261 m >>= 8; 250 m >>= 8;
262 } 251 }
263} 252}
@@ -268,7 +257,7 @@ void disable_gptimers(uint16_t mask)
268 _disable_gptimers(mask); 257 _disable_gptimers(mask);
269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) 258 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
270 if (mask & (1 << i)) 259 if (mask & (1 << i))
271 group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; 260 bfin_write(&group_regs[BFIN_TIMER_OCTET(i)]->status, trun_mask[i]);
272 SSYNC(); 261 SSYNC();
273} 262}
274EXPORT_SYMBOL(disable_gptimers); 263EXPORT_SYMBOL(disable_gptimers);
@@ -283,7 +272,7 @@ EXPORT_SYMBOL(disable_gptimers_sync);
283void set_gptimer_pulse_hi(unsigned int timer_id) 272void set_gptimer_pulse_hi(unsigned int timer_id)
284{ 273{
285 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 274 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
286 timer_regs[timer_id]->config |= TIMER_PULSE_HI; 275 bfin_write_or(&timer_regs[timer_id]->config, TIMER_PULSE_HI);
287 SSYNC(); 276 SSYNC();
288} 277}
289EXPORT_SYMBOL(set_gptimer_pulse_hi); 278EXPORT_SYMBOL(set_gptimer_pulse_hi);
@@ -291,7 +280,7 @@ EXPORT_SYMBOL(set_gptimer_pulse_hi);
291void clear_gptimer_pulse_hi(unsigned int timer_id) 280void clear_gptimer_pulse_hi(unsigned int timer_id)
292{ 281{
293 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 282 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
294 timer_regs[timer_id]->config &= ~TIMER_PULSE_HI; 283 bfin_write_and(&timer_regs[timer_id]->config, ~TIMER_PULSE_HI);
295 SSYNC(); 284 SSYNC();
296} 285}
297EXPORT_SYMBOL(clear_gptimer_pulse_hi); 286EXPORT_SYMBOL(clear_gptimer_pulse_hi);
@@ -301,7 +290,7 @@ uint16_t get_enabled_gptimers(void)
301 int i; 290 int i;
302 uint16_t result = 0; 291 uint16_t result = 0;
303 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) 292 for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i)
304 result |= (group_regs[i]->enable << (i << 3)); 293 result |= (bfin_read(&group_regs[i]->enable) << (i << 3));
305 return result; 294 return result;
306} 295}
307EXPORT_SYMBOL(get_enabled_gptimers); 296EXPORT_SYMBOL(get_enabled_gptimers);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a660fa921b5..6a80a9e9fc4a 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -140,7 +140,6 @@ EXPORT_SYMBOL(kernel_thread);
140 */ 140 */
141void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 141void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
142{ 142{
143 set_fs(USER_DS);
144 regs->pc = new_ip; 143 regs->pc = new_ip;
145 if (current->mm) 144 if (current->mm)
146 regs->p5 = current->mm->start_data; 145 regs->p5 = current->mm->start_data;
diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c
new file mode 100644
index 000000000000..33f5942733bd
--- /dev/null
+++ b/arch/blackfin/kernel/pwm.c
@@ -0,0 +1,100 @@
1/*
2 * Blackfin Pulse Width Modulation (PWM) core
3 *
4 * Copyright (c) 2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/pwm.h>
11#include <linux/slab.h>
12
13#include <asm/gptimers.h>
14#include <asm/portmux.h>
15
16struct pwm_device {
17 unsigned id;
18 unsigned short pin;
19};
20
21static const unsigned short pwm_to_gptimer_per[] = {
22 P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
23 P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
24};
25
26struct pwm_device *pwm_request(int pwm_id, const char *label)
27{
28 struct pwm_device *pwm;
29 int ret;
30
31 /* XXX: pwm_id really should be unsigned */
32 if (pwm_id < 0)
33 return NULL;
34
35 pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
36 if (!pwm)
37 return pwm;
38
39 pwm->id = pwm_id;
40 if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
41 goto err;
42
43 pwm->pin = pwm_to_gptimer_per[pwm->id];
44 ret = peripheral_request(pwm->pin, label);
45 if (ret)
46 goto err;
47
48 return pwm;
49 err:
50 kfree(pwm);
51 return NULL;
52}
53EXPORT_SYMBOL(pwm_request);
54
55void pwm_free(struct pwm_device *pwm)
56{
57 peripheral_free(pwm->pin);
58 kfree(pwm);
59}
60EXPORT_SYMBOL(pwm_free);
61
62int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
63{
64 unsigned long period, duty;
65 unsigned long long val;
66
67 if (duty_ns < 0 || duty_ns > period_ns)
68 return -EINVAL;
69
70 val = (unsigned long long)get_sclk() * period_ns;
71 do_div(val, NSEC_PER_SEC);
72 period = val;
73
74 val = (unsigned long long)period * duty_ns;
75 do_div(val, period_ns);
76 duty = period - val;
77
78 if (duty >= period)
79 duty = period - 1;
80
81 set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
82 set_gptimer_pwidth(pwm->id, duty);
83 set_gptimer_period(pwm->id, period);
84
85 return 0;
86}
87EXPORT_SYMBOL(pwm_config);
88
89int pwm_enable(struct pwm_device *pwm)
90{
91 enable_gptimer(pwm->id);
92 return 0;
93}
94EXPORT_SYMBOL(pwm_enable);
95
96void pwm_disable(struct pwm_device *pwm)
97{
98 disable_gptimer(pwm->id);
99}
100EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 488bdc51aaa5..c4c0081b1996 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -54,7 +54,9 @@ static void bfin_reset(void)
54 54
55 /* The BF526 ROM will crash during reset */ 55 /* The BF526 ROM will crash during reset */
56#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) 56#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
57 bfin_read_SWRST(); 57 /* Seems to be fixed with newer parts though ... */
58 if (__SILICON_REVISION__ < 1 && bfin_revid() < 1)
59 bfin_read_SWRST();
58#endif 60#endif
59 61
60 /* Wait for the SWRST write to complete. Cannot rely on SSYNC 62 /* Wait for the SWRST write to complete. Cannot rely on SSYNC
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 536bd9d7e0cf..dfa2525a442d 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -54,8 +54,7 @@ EXPORT_SYMBOL(mtd_size);
54#endif 54#endif
55 55
56char __initdata command_line[COMMAND_LINE_SIZE]; 56char __initdata command_line[COMMAND_LINE_SIZE];
57void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat, 57struct blackfin_initial_pda __initdata initial_pda;
58 *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
59 58
60/* boot memmap, for parsing "memmap=" */ 59/* boot memmap, for parsing "memmap=" */
61#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */ 60#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
@@ -957,13 +956,16 @@ void __init setup_arch(char **cmdline_p)
957 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 956 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
958#ifdef CONFIG_DEBUG_DOUBLEFAULT 957#ifdef CONFIG_DEBUG_DOUBLEFAULT
959 /* We assume the crashing kernel, and the current symbol table match */ 958 /* We assume the crashing kernel, and the current symbol table match */
960 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", 959 printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
961 (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx); 960 initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
962 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr); 961 initial_pda.retx_doublefault);
963 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr); 962 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
963 initial_pda.dcplb_doublefault_addr);
964 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
965 initial_pda.icplb_doublefault_addr);
964#endif 966#endif
965 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", 967 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
966 init_retx); 968 initial_pda.retx);
967 } else if (_bfin_swrst & RESET_WDOG) 969 } else if (_bfin_swrst & RESET_WDOG)
968 printk(KERN_INFO "Recovering from Watchdog event\n"); 970 printk(KERN_INFO "Recovering from Watchdog event\n");
969 else if (_bfin_swrst & RESET_SOFTWARE) 971 else if (_bfin_swrst & RESET_SOFTWARE)
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 8d73724c0092..ceb2bf63dfe2 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -51,7 +51,7 @@ void __init setup_core_timer(void)
51 u32 tcount; 51 u32 tcount;
52 52
53 /* power up the timer, but don't enable it just yet */ 53 /* power up the timer, but don't enable it just yet */
54 bfin_write_TCNTL(1); 54 bfin_write_TCNTL(TMPWR);
55 CSYNC(); 55 CSYNC();
56 56
57 /* the TSCALE prescaler counter */ 57 /* the TSCALE prescaler counter */
@@ -64,7 +64,7 @@ void __init setup_core_timer(void)
64 /* now enable the timer */ 64 /* now enable the timer */
65 CSYNC(); 65 CSYNC();
66 66
67 bfin_write_TCNTL(7); 67 bfin_write_TCNTL(TAUTORLD | TMREN | TMPWR);
68} 68}
69#endif 69#endif
70 70
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 3ac5b66d14aa..ba35864b2b74 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -155,6 +155,7 @@ SECTIONS
155 SECURITY_INITCALL 155 SECURITY_INITCALL
156 INIT_RAM_FS 156 INIT_RAM_FS
157 157
158 . = ALIGN(PAGE_SIZE);
158 ___per_cpu_load = .; 159 ___per_cpu_load = .;
159 PERCPU_INPUT(32) 160 PERCPU_INPUT(32)
160 161
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
index 1d9f631a7f94..bde92a19970e 100644
--- a/arch/blackfin/mach-bf518/Kconfig
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -11,55 +11,75 @@ menu "BF518 Specific Configuration"
11comment "Alternative Multiplexing Scheme" 11comment "Alternative Multiplexing Scheme"
12 12
13choice 13choice
14 prompt "SPORT0" 14 prompt "PWM Channel Pins"
15 default BF518_SPORT0_PORTG 15 default BF518_PWM_ALL_PORTF
16 help 16 help
17 Select PORT used for SPORT0. See Hardware Reference Manual 17 Select pins used for the PWM channels:
18 PWM_AH PWM_AL PWM_BH PWM_BL PWM_CH PWM_CL
18 19
19config BF518_SPORT0_PORTF 20 See the Hardware Reference Manual for more details.
20 bool "PORT F" 21
22config BF518_PWM_ALL_PORTF
23 bool "PF1 - PF6"
21 help 24 help
22 PORT F 25 PF{1,2,3,4,5,6} <-> PWM_{AH,AL,BH,BL,CH,CL}
23 26
24config BF518_SPORT0_PORTG 27config BF518_PWM_PORTF_PORTG
25 bool "PORT G" 28 bool "PF11 - PF14 / PG1 - PG2"
26 help 29 help
27 PORT G 30 PF{11,12,13,14} <-> PWM_{AH,AL,BH,BL}
31 PG{1,2} <-> PWM_{CH,CL}
32
28endchoice 33endchoice
29 34
30choice 35choice
31 prompt "SPORT0 TSCLK Location" 36 prompt "PWM Sync Pin"
32 depends on BF518_SPORT0_PORTG 37 default BF518_PWM_SYNC_PF7
33 default BF518_SPORT0_TSCLK_PG10
34 help 38 help
35 Select PIN used for SPORT0_TSCLK. See Hardware Reference Manual 39 Select the pin used for PWM_SYNC.
36 40
37config BF518_SPORT0_TSCLK_PG10 41 See the Hardware Reference Manual for more details.
38 bool "PORT PG10" 42
39 help 43config BF518_PWM_SYNC_PF7
40 PORT PG10 44 bool "PF7"
45config BF518_PWM_SYNC_PF15
46 bool "PF15"
47endchoice
41 48
42config BF518_SPORT0_TSCLK_PG14 49choice
43 bool "PORT PG14" 50 prompt "PWM Trip B Pin"
51 default BF518_PWM_TRIPB_PG10
44 help 52 help
45 PORT PG14 53 Select the pin used for PWM_TRIPB.
54
55 See the Hardware Reference Manual for more details.
56
57config BF518_PWM_TRIPB_PG10
58 bool "PG10"
59config BF518_PWM_TRIPB_PG14
60 bool "PG14"
46endchoice 61endchoice
47 62
48choice 63choice
49 prompt "UART1" 64 prompt "PPI / Timer Pins"
50 default BF518_UART1_PORTF 65 default BF518_PPI_TMR_PG5
51 help 66 help
52 Select PORT used for UART1. See Hardware Reference Manual 67 Select pins used for PPI/Timer:
68 PPICLK PPIFS1 PPIFS2
69 TMRCLK TMR0 TMR1
53 70
54config BF518_UART1_PORTF 71 See the Hardware Reference Manual for more details.
55 bool "PORT F" 72
73config BF518_PPI_TMR_PG5
74 bool "PG5 - PG7"
56 help 75 help
57 PORT F 76 PG{5,6,7} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
58 77
59config BF518_UART1_PORTG 78config BF518_PPI_TMR_PG12
60 bool "PORT G" 79 bool "PG12 - PG14"
61 help 80 help
62 PORT G 81 PG{12,13,14} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
82
63endchoice 83endchoice
64 84
65comment "Hysteresis/Schmitt Trigger Control" 85comment "Hysteresis/Schmitt Trigger Control"
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index c0ccadcfa44e..d78fc2cc7d16 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -187,43 +187,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
187/* SPI flash chip (m25p64) */ 187/* SPI flash chip (m25p64) */
188static struct bfin5xx_spi_chip spi_flash_chip_info = { 188static struct bfin5xx_spi_chip spi_flash_chip_info = {
189 .enable_dma = 0, /* use dma transfer with this chip*/ 189 .enable_dma = 0, /* use dma transfer with this chip*/
190 .bits_per_word = 8,
191}; 190};
192#endif 191#endif
193 192
194#if defined(CONFIG_BFIN_SPI_ADC) \
195 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
196/* SPI ADC chip */
197static struct bfin5xx_spi_chip spi_adc_chip_info = {
198 .enable_dma = 1, /* use dma transfer with this chip*/
199 .bits_per_word = 16,
200};
201#endif
202
203#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
204#if defined(CONFIG_NET_DSA_KSZ8893M) \
205 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
206/* SPI SWITCH CHIP */
207static struct bfin5xx_spi_chip spi_switch_info = {
208 .enable_dma = 0,
209 .bits_per_word = 8,
210};
211#endif
212#endif
213
214#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 193#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
215static struct bfin5xx_spi_chip mmc_spi_chip_info = { 194static struct bfin5xx_spi_chip mmc_spi_chip_info = {
216 .enable_dma = 0, 195 .enable_dma = 0,
217 .bits_per_word = 8,
218}; 196};
219#endif 197#endif
220 198
221#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 199#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
222static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
223 .enable_dma = 0,
224 .bits_per_word = 16,
225};
226
227static const struct ad7877_platform_data bfin_ad7877_ts_info = { 200static const struct ad7877_platform_data bfin_ad7877_ts_info = {
228 .model = 7877, 201 .model = 7877,
229 .vref_delay_usecs = 50, /* internal, no capacitor */ 202 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -239,21 +212,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
239}; 212};
240#endif 213#endif
241 214
242#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
243 && defined(CONFIG_SND_SOC_WM8731_SPI)
244static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
245 .enable_dma = 0,
246 .bits_per_word = 16,
247};
248#endif
249
250#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
251static struct bfin5xx_spi_chip spidev_chip_info = {
252 .enable_dma = 0,
253 .bits_per_word = 8,
254};
255#endif
256
257static struct spi_board_info bfin_spi_board_info[] __initdata = { 215static struct spi_board_info bfin_spi_board_info[] __initdata = {
258#if defined(CONFIG_MTD_M25P80) \ 216#if defined(CONFIG_MTD_M25P80) \
259 || defined(CONFIG_MTD_M25P80_MODULE) 217 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -269,18 +227,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
269 }, 227 },
270#endif 228#endif
271 229
272#if defined(CONFIG_BFIN_SPI_ADC) \
273 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
274 {
275 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
276 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
277 .bus_num = 0, /* Framework bus number */
278 .chip_select = 1, /* Framework chip select. */
279 .platform_data = NULL, /* No spi_driver specific config */
280 .controller_data = &spi_adc_chip_info,
281 },
282#endif
283
284#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 230#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
285#if defined(CONFIG_NET_DSA_KSZ8893M) \ 231#if defined(CONFIG_NET_DSA_KSZ8893M) \
286 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE) 232 || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
@@ -290,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
290 .bus_num = 0, 236 .bus_num = 0,
291 .chip_select = 1, 237 .chip_select = 1,
292 .platform_data = NULL, 238 .platform_data = NULL,
293 .controller_data = &spi_switch_info,
294 .mode = SPI_MODE_3, 239 .mode = SPI_MODE_3,
295 }, 240 },
296#endif 241#endif
@@ -314,7 +259,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
314 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 259 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
315 .bus_num = 0, 260 .bus_num = 0,
316 .chip_select = 2, 261 .chip_select = 2,
317 .controller_data = &spi_ad7877_chip_info,
318 }, 262 },
319#endif 263#endif
320#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 264#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -324,7 +268,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
324 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 268 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
325 .bus_num = 0, 269 .bus_num = 0,
326 .chip_select = 5, 270 .chip_select = 5,
327 .controller_data = &spi_wm8731_chip_info,
328 .mode = SPI_MODE_0, 271 .mode = SPI_MODE_0,
329 }, 272 },
330#endif 273#endif
@@ -334,7 +277,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
334 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 277 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
335 .bus_num = 0, 278 .bus_num = 0,
336 .chip_select = 1, 279 .chip_select = 1,
337 .controller_data = &spidev_chip_info,
338 }, 280 },
339#endif 281#endif
340#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 282#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -343,7 +285,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
343 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 285 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
344 .bus_num = 0, 286 .bus_num = 0,
345 .chip_select = 1, 287 .chip_select = 1,
346 .controller_data = &lq035q1_spi_chip_info,
347 .mode = SPI_CPHA | SPI_CPOL, 288 .mode = SPI_CPHA | SPI_CPOL,
348 }, 289 },
349#endif 290#endif
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 50fc5c89e379..55c127908815 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -138,32 +138,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
138/* SPI flash chip (m25p64) */ 138/* SPI flash chip (m25p64) */
139static struct bfin5xx_spi_chip spi_flash_chip_info = { 139static struct bfin5xx_spi_chip spi_flash_chip_info = {
140 .enable_dma = 0, /* use dma transfer with this chip*/ 140 .enable_dma = 0, /* use dma transfer with this chip*/
141 .bits_per_word = 8,
142};
143#endif
144
145#if defined(CONFIG_BFIN_SPI_ADC) \
146 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
147/* SPI ADC chip */
148static struct bfin5xx_spi_chip spi_adc_chip_info = {
149 .enable_dma = 1, /* use dma transfer with this chip*/
150 .bits_per_word = 16,
151}; 141};
152#endif 142#endif
153 143
154#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 144#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
155static struct bfin5xx_spi_chip mmc_spi_chip_info = { 145static struct bfin5xx_spi_chip mmc_spi_chip_info = {
156 .enable_dma = 0, 146 .enable_dma = 0,
157 .bits_per_word = 8,
158}; 147};
159#endif 148#endif
160 149
161#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 150#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
162static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
163 .enable_dma = 0,
164 .bits_per_word = 16,
165};
166
167static const struct ad7877_platform_data bfin_ad7877_ts_info = { 151static const struct ad7877_platform_data bfin_ad7877_ts_info = {
168 .model = 7877, 152 .model = 7877,
169 .vref_delay_usecs = 50, /* internal, no capacitor */ 153 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -179,21 +163,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
179}; 163};
180#endif 164#endif
181 165
182#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
183 && defined(CONFIG_SND_SOC_WM8731_SPI)
184static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
185 .enable_dma = 0,
186 .bits_per_word = 16,
187};
188#endif
189
190#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
191static struct bfin5xx_spi_chip spidev_chip_info = {
192 .enable_dma = 0,
193 .bits_per_word = 8,
194};
195#endif
196
197static struct spi_board_info bfin_spi_board_info[] __initdata = { 166static struct spi_board_info bfin_spi_board_info[] __initdata = {
198#if defined(CONFIG_MTD_M25P80) \ 167#if defined(CONFIG_MTD_M25P80) \
199 || defined(CONFIG_MTD_M25P80_MODULE) 168 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -209,18 +178,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
209 }, 178 },
210#endif 179#endif
211 180
212#if defined(CONFIG_BFIN_SPI_ADC) \
213 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
214 {
215 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
216 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
217 .bus_num = 0, /* Framework bus number */
218 .chip_select = 1, /* Framework chip select. */
219 .platform_data = NULL, /* No spi_driver specific config */
220 .controller_data = &spi_adc_chip_info,
221 },
222#endif
223
224#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 181#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
225 { 182 {
226 .modalias = "mmc_spi", 183 .modalias = "mmc_spi",
@@ -239,7 +196,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
239 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 196 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
240 .bus_num = 0, 197 .bus_num = 0,
241 .chip_select = 2, 198 .chip_select = 2,
242 .controller_data = &spi_ad7877_chip_info,
243 }, 199 },
244#endif 200#endif
245#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 201#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -249,7 +205,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
249 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 205 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
250 .bus_num = 0, 206 .bus_num = 0,
251 .chip_select = 5, 207 .chip_select = 5,
252 .controller_data = &spi_wm8731_chip_info,
253 .mode = SPI_MODE_0, 208 .mode = SPI_MODE_0,
254 }, 209 },
255#endif 210#endif
@@ -259,7 +214,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
259 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 214 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
260 .bus_num = 0, 215 .bus_num = 0,
261 .chip_select = 1, 216 .chip_select = 1,
262 .controller_data = &spidev_chip_info,
263 }, 217 },
264#endif 218#endif
265#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 219#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -268,7 +222,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
268 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 222 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
269 .bus_num = 0, 223 .bus_num = 0,
270 .chip_select = 1, 224 .chip_select = 1,
271 .controller_data = &lq035q1_spi_chip_info,
272 .mode = SPI_CPHA | SPI_CPOL, 225 .mode = SPI_CPHA | SPI_CPOL,
273 }, 226 },
274#endif 227#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index d2f076fbbc9e..56383f7cbc07 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -11,10 +11,9 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 01/26/2010; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List 14 * - Revision F, 05/23/2011; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
18#if __SILICON_REVISION__ < 0 17#if __SILICON_REVISION__ < 0
19# error will not work on BF518 silicon version 18# error will not work on BF518 silicon version
20#endif 19#endif
@@ -77,19 +76,29 @@
77/* False Hardware Error when RETI Points to Invalid Memory */ 76/* False Hardware Error when RETI Points to Invalid Memory */
78#define ANOMALY_05000461 (1) 77#define ANOMALY_05000461 (1)
79/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 78/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
80#define ANOMALY_05000462 (1) 79#define ANOMALY_05000462 (__SILICON_REVISION__ < 2)
81/* PLL Latches Incorrect Settings During Reset */
82#define ANOMALY_05000469 (1)
83/* Incorrect Default MSEL Value in PLL_CTL */ 80/* Incorrect Default MSEL Value in PLL_CTL */
84#define ANOMALY_05000472 (1) 81#define ANOMALY_05000472 (__SILICON_REVISION__ < 2)
85/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */ 82/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
86#define ANOMALY_05000473 (1) 83#define ANOMALY_05000473 (1)
87/* TESTSET Instruction Cannot Be Interrupted */ 84/* TESTSET Instruction Cannot Be Interrupted */
88#define ANOMALY_05000477 (1) 85#define ANOMALY_05000477 (1)
89/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 86/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
90#define ANOMALY_05000481 (1) 87#define ANOMALY_05000481 (1)
91/* IFLUSH sucks at life */ 88/* PLL Latches Incorrect Settings During Reset */
89#define ANOMALY_05000482 (__SILICON_REVISION__ < 2)
90/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
91#define ANOMALY_05000485 (__SILICON_REVISION__ < 2)
92/* SPI Master Boot Can Fail Under Certain Conditions */
93#define ANOMALY_05000490 (1)
94/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
92#define ANOMALY_05000491 (1) 95#define ANOMALY_05000491 (1)
96/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
97#define ANOMALY_05000494 (1)
98/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
99#define ANOMALY_05000498 (1)
100/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
101#define ANOMALY_05000501 (1)
93 102
94/* Anomalies that don't exist on this proc */ 103/* Anomalies that don't exist on this proc */
95#define ANOMALY_05000099 (0) 104#define ANOMALY_05000099 (0)
@@ -157,6 +166,5 @@
157#define ANOMALY_05000474 (0) 166#define ANOMALY_05000474 (0)
158#define ANOMALY_05000475 (0) 167#define ANOMALY_05000475 (0)
159#define ANOMALY_05000480 (0) 168#define ANOMALY_05000480 (0)
160#define ANOMALY_05000485 (0)
161 169
162#endif 170#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/portmux.h b/arch/blackfin/mach-bf518/include/mach/portmux.h
index cd84a569b04e..b3b806f468da 100644
--- a/arch/blackfin/mach-bf518/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf518/include/mach/portmux.h
@@ -81,9 +81,15 @@
81#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1)) 81#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
82#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1)) 82#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
83 83
84#ifndef CONFIG_BF518_PPI_TMR_PG12
85#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
86#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
87#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
88#else
84#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1)) 89#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1))
85#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(1)) 90#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(1))
86#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1)) 91#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1))
92#endif
87#define P_PPI0_FS3 (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1)) 93#define P_PPI0_FS3 (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1))
88 94
89/* SPI Port Mux */ 95/* SPI Port Mux */
@@ -139,9 +145,15 @@
139#define P_UART1_RX (P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(1)) 145#define P_UART1_RX (P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(1))
140 146
141/* Timer */ 147/* Timer */
148#ifndef CONFIG_BF518_PPI_TMR_PG12
142#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(2)) 149#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(2))
143#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2)) 150#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2))
144#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(2)) 151#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(2))
152#else
153#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2))
154#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2))
155#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
156#endif
145#define P_TMR2 (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2)) 157#define P_TMR2 (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2))
146#define P_TMR3 (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2)) 158#define P_TMR3 (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2))
147#define P_TMR4 (P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(2)) 159#define P_TMR4 (P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(2))
@@ -158,23 +170,33 @@
158#define P_TWI0_SDA (P_DONTCARE) 170#define P_TWI0_SDA (P_DONTCARE)
159 171
160/* PWM */ 172/* PWM */
161#define P_PWM0_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2)) 173#ifndef CONFIG_BF518_PWM_PORTF_PORTG
162#define P_PWM0_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2)) 174#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
163#define P_PWM0_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2)) 175#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
164#define P_PWM0_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2)) 176#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
165#define P_PWM0_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2)) 177#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
166#define P_PWM0_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2)) 178#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
167#define P_PWM0_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2)) 179#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
168 180#else
169#define P_PWM1_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2)) 181#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
170#define P_PWM1_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2)) 182#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
171#define P_PWM1_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2)) 183#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
172#define P_PWM1_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2)) 184#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
173#define P_PWM1_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2)) 185#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
174#define P_PWM1_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2)) 186#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
175#define P_PWM1_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2)) 187#endif
176 188
189#ifndef CONFIG_BF518_PWM_SYNC_PF15
190#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
191#else
192#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
193#endif
194
195#ifndef CONFIG_BF518_PWM_TRIPB_PG14
196#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(2))
197#else
177#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2)) 198#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
199#endif
178 200
179/* RSI */ 201/* RSI */
180#define P_RSI_DATA0 (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1)) 202#define P_RSI_DATA0 (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1))
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index ccab4c689dc3..c04df43f6391 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -265,29 +265,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
265/* SPI flash chip (m25p64) */ 265/* SPI flash chip (m25p64) */
266static struct bfin5xx_spi_chip spi_flash_chip_info = { 266static struct bfin5xx_spi_chip spi_flash_chip_info = {
267 .enable_dma = 0, /* use dma transfer with this chip*/ 267 .enable_dma = 0, /* use dma transfer with this chip*/
268 .bits_per_word = 8,
269};
270#endif
271
272#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
273 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
274static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
275 .enable_dma = 0,
276 .bits_per_word = 16,
277}; 268};
278#endif 269#endif
279 270
280#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 271#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
281static struct bfin5xx_spi_chip mmc_spi_chip_info = { 272static struct bfin5xx_spi_chip mmc_spi_chip_info = {
282 .enable_dma = 0, 273 .enable_dma = 0,
283 .bits_per_word = 8,
284};
285#endif
286
287#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
288static struct bfin5xx_spi_chip spidev_chip_info = {
289 .enable_dma = 0,
290 .bits_per_word = 8,
291}; 274};
292#endif 275#endif
293 276
@@ -328,7 +311,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
328 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 311 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
329 .bus_num = 0, 312 .bus_num = 0,
330 .chip_select = 4, 313 .chip_select = 4,
331 .controller_data = &ad1836_spi_chip_info,
332 }, 314 },
333#endif 315#endif
334#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 316#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -347,7 +329,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
347 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 329 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
348 .bus_num = 0, 330 .bus_num = 0,
349 .chip_select = 1, 331 .chip_select = 1,
350 .controller_data = &spidev_chip_info,
351 }, 332 },
352#endif 333#endif
353}; 334};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index c9d6dc88f0e6..6400341cc230 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -354,40 +354,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
354/* SPI flash chip (m25p64) */ 354/* SPI flash chip (m25p64) */
355static struct bfin5xx_spi_chip spi_flash_chip_info = { 355static struct bfin5xx_spi_chip spi_flash_chip_info = {
356 .enable_dma = 0, /* use dma transfer with this chip*/ 356 .enable_dma = 0, /* use dma transfer with this chip*/
357 .bits_per_word = 8,
358};
359#endif
360
361#if defined(CONFIG_BFIN_SPI_ADC) \
362 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
363/* SPI ADC chip */
364static struct bfin5xx_spi_chip spi_adc_chip_info = {
365 .enable_dma = 1, /* use dma transfer with this chip*/
366 .bits_per_word = 16,
367};
368#endif
369
370#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
371 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
372static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
373 .enable_dma = 0,
374 .bits_per_word = 16,
375}; 357};
376#endif 358#endif
377 359
378#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 360#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
379static struct bfin5xx_spi_chip mmc_spi_chip_info = { 361static struct bfin5xx_spi_chip mmc_spi_chip_info = {
380 .enable_dma = 0, 362 .enable_dma = 0,
381 .bits_per_word = 8,
382}; 363};
383#endif 364#endif
384 365
385#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 366#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
386static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
387 .enable_dma = 0,
388 .bits_per_word = 16,
389};
390
391static const struct ad7877_platform_data bfin_ad7877_ts_info = { 367static const struct ad7877_platform_data bfin_ad7877_ts_info = {
392 .model = 7877, 368 .model = 7877,
393 .vref_delay_usecs = 50, /* internal, no capacitor */ 369 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -403,21 +379,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
403}; 379};
404#endif 380#endif
405 381
406#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
407 && defined(CONFIG_SND_SOC_WM8731_SPI)
408static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
409 .enable_dma = 0,
410 .bits_per_word = 16,
411};
412#endif
413
414#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
415static struct bfin5xx_spi_chip spidev_chip_info = {
416 .enable_dma = 0,
417 .bits_per_word = 8,
418};
419#endif
420
421static struct spi_board_info bfin_spi_board_info[] __initdata = { 382static struct spi_board_info bfin_spi_board_info[] __initdata = {
422#if defined(CONFIG_MTD_M25P80) \ 383#if defined(CONFIG_MTD_M25P80) \
423 || defined(CONFIG_MTD_M25P80_MODULE) 384 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -433,18 +394,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
433 }, 394 },
434#endif 395#endif
435 396
436#if defined(CONFIG_BFIN_SPI_ADC) \
437 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
438 {
439 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
440 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
441 .bus_num = 0, /* Framework bus number */
442 .chip_select = 1, /* Framework chip select. */
443 .platform_data = NULL, /* No spi_driver specific config */
444 .controller_data = &spi_adc_chip_info,
445 },
446#endif
447
448#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 397#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
449 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 398 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
450 { 399 {
@@ -452,7 +401,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
452 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 401 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
453 .bus_num = 0, 402 .bus_num = 0,
454 .chip_select = 4, 403 .chip_select = 4,
455 .controller_data = &ad1836_spi_chip_info,
456 }, 404 },
457#endif 405#endif
458#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 406#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -473,7 +421,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
473 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 421 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
474 .bus_num = 0, 422 .bus_num = 0,
475 .chip_select = 2, 423 .chip_select = 2,
476 .controller_data = &spi_ad7877_chip_info,
477 }, 424 },
478#endif 425#endif
479#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ 426#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -483,7 +430,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
483 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 430 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
484 .bus_num = 0, 431 .bus_num = 0,
485 .chip_select = 5, 432 .chip_select = 5,
486 .controller_data = &spi_wm8731_chip_info,
487 .mode = SPI_MODE_0, 433 .mode = SPI_MODE_0,
488 }, 434 },
489#endif 435#endif
@@ -493,7 +439,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
493 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 439 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
494 .bus_num = 0, 440 .bus_num = 0,
495 .chip_select = 1, 441 .chip_select = 1,
496 .controller_data = &spidev_chip_info,
497 }, 442 },
498#endif 443#endif
499}; 444};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index b7101aa6e3aa..6dbb1b403763 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -253,32 +253,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
253/* SPI flash chip (sst25wf040) */ 253/* SPI flash chip (sst25wf040) */
254static struct bfin5xx_spi_chip spi_flash_chip_info = { 254static struct bfin5xx_spi_chip spi_flash_chip_info = {
255 .enable_dma = 0, /* use dma transfer with this chip*/ 255 .enable_dma = 0, /* use dma transfer with this chip*/
256 .bits_per_word = 8,
257};
258#endif
259
260#if defined(CONFIG_BFIN_SPI_ADC) \
261 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
262/* SPI ADC chip */
263static struct bfin5xx_spi_chip spi_adc_chip_info = {
264 .enable_dma = 1, /* use dma transfer with this chip*/
265 .bits_per_word = 16,
266}; 256};
267#endif 257#endif
268 258
269#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 259#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
270static struct bfin5xx_spi_chip mmc_spi_chip_info = { 260static struct bfin5xx_spi_chip mmc_spi_chip_info = {
271 .enable_dma = 0, 261 .enable_dma = 0,
272 .bits_per_word = 8,
273}; 262};
274#endif 263#endif
275 264
276#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 265#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
277static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
278 .enable_dma = 0,
279 .bits_per_word = 16,
280};
281
282static const struct ad7877_platform_data bfin_ad7877_ts_info = { 266static const struct ad7877_platform_data bfin_ad7877_ts_info = {
283 .model = 7877, 267 .model = 7877,
284 .vref_delay_usecs = 50, /* internal, no capacitor */ 268 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -311,35 +295,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
311}; 295};
312#endif 296#endif
313 297
314#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
315static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
316 .enable_dma = 0,
317 .bits_per_word = 16,
318};
319#endif
320
321#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
322 && defined(CONFIG_SND_SOC_WM8731_SPI)
323static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
324 .enable_dma = 0,
325 .bits_per_word = 16,
326};
327#endif
328
329#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
330static struct bfin5xx_spi_chip spidev_chip_info = {
331 .enable_dma = 0,
332 .bits_per_word = 8,
333};
334#endif
335
336#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
337static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
338 .enable_dma = 0,
339 .bits_per_word = 8,
340};
341#endif
342
343static struct spi_board_info bfin_spi_board_info[] __initdata = { 298static struct spi_board_info bfin_spi_board_info[] __initdata = {
344#if defined(CONFIG_MTD_M25P80) \ 299#if defined(CONFIG_MTD_M25P80) \
345 || defined(CONFIG_MTD_M25P80_MODULE) 300 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -355,18 +310,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
355 }, 310 },
356#endif 311#endif
357 312
358#if defined(CONFIG_BFIN_SPI_ADC) \
359 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
360 {
361 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
362 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
363 .bus_num = 0, /* Framework bus number */
364 .chip_select = 1, /* Framework chip select. */
365 .platform_data = NULL, /* No spi_driver specific config */
366 .controller_data = &spi_adc_chip_info,
367 },
368#endif
369
370#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 313#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
371 { 314 {
372 .modalias = "mmc_spi", 315 .modalias = "mmc_spi",
@@ -385,7 +328,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
385 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 328 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
386 .bus_num = 0, 329 .bus_num = 0,
387 .chip_select = 2, 330 .chip_select = 2,
388 .controller_data = &spi_ad7877_chip_info,
389 }, 331 },
390#endif 332#endif
391#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 333#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -396,7 +338,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
396 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 338 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
397 .bus_num = 0, 339 .bus_num = 0,
398 .chip_select = 5, 340 .chip_select = 5,
399 .controller_data = &spi_ad7879_chip_info,
400 .mode = SPI_CPHA | SPI_CPOL, 341 .mode = SPI_CPHA | SPI_CPOL,
401 }, 342 },
402#endif 343#endif
@@ -407,7 +348,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
407 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 348 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
408 .bus_num = 0, 349 .bus_num = 0,
409 .chip_select = 5, 350 .chip_select = 5,
410 .controller_data = &spi_wm8731_chip_info,
411 .mode = SPI_MODE_0, 351 .mode = SPI_MODE_0,
412 }, 352 },
413#endif 353#endif
@@ -417,7 +357,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
417 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 357 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
418 .bus_num = 0, 358 .bus_num = 0,
419 .chip_select = 1, 359 .chip_select = 1,
420 .controller_data = &spidev_chip_info,
421 }, 360 },
422#endif 361#endif
423#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 362#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -426,7 +365,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
426 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 365 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
427 .bus_num = 0, 366 .bus_num = 0,
428 .chip_select = 1, 367 .chip_select = 1,
429 .controller_data = &lq035q1_spi_chip_info,
430 .mode = SPI_CPHA | SPI_CPOL, 368 .mode = SPI_CPHA | SPI_CPOL,
431 }, 369 },
432#endif 370#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index e67ac7720668..4e9dc9cf8241 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -409,6 +409,9 @@ static struct resource net2272_bfin_resources[] = {
409 .end = 0x20300000 + 0x100, 409 .end = 0x20300000 + 0x100,
410 .flags = IORESOURCE_MEM, 410 .flags = IORESOURCE_MEM,
411 }, { 411 }, {
412 .start = 1,
413 .flags = IORESOURCE_BUS,
414 }, {
412 .start = IRQ_PF7, 415 .start = IRQ_PF7,
413 .end = IRQ_PF7, 416 .end = IRQ_PF7,
414 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 417 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -448,40 +451,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
448/* SPI flash chip (m25p64) */ 451/* SPI flash chip (m25p64) */
449static struct bfin5xx_spi_chip spi_flash_chip_info = { 452static struct bfin5xx_spi_chip spi_flash_chip_info = {
450 .enable_dma = 0, /* use dma transfer with this chip*/ 453 .enable_dma = 0, /* use dma transfer with this chip*/
451 .bits_per_word = 8,
452};
453#endif
454
455#if defined(CONFIG_BFIN_SPI_ADC) \
456 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
457/* SPI ADC chip */
458static struct bfin5xx_spi_chip spi_adc_chip_info = {
459 .enable_dma = 1, /* use dma transfer with this chip*/
460 .bits_per_word = 16,
461};
462#endif
463
464#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
465 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
466static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
467 .enable_dma = 0,
468 .bits_per_word = 16,
469}; 454};
470#endif 455#endif
471 456
472#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 457#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
473static struct bfin5xx_spi_chip mmc_spi_chip_info = { 458static struct bfin5xx_spi_chip mmc_spi_chip_info = {
474 .enable_dma = 0, 459 .enable_dma = 0,
475 .bits_per_word = 8,
476}; 460};
477#endif 461#endif
478 462
479#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 463#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
480static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
481 .enable_dma = 0,
482 .bits_per_word = 16,
483};
484
485static const struct ad7877_platform_data bfin_ad7877_ts_info = { 464static const struct ad7877_platform_data bfin_ad7877_ts_info = {
486 .model = 7877, 465 .model = 7877,
487 .vref_delay_usecs = 50, /* internal, no capacitor */ 466 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -513,20 +492,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
513}; 492};
514#endif 493#endif
515 494
516#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
517static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
518 .enable_dma = 0,
519 .bits_per_word = 16,
520};
521#endif
522
523#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
524static struct bfin5xx_spi_chip spidev_chip_info = {
525 .enable_dma = 0,
526 .bits_per_word = 8,
527};
528#endif
529
530#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 495#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
531 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 496 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
532 497
@@ -574,9 +539,25 @@ static struct resource bfin_snd_resources[][4] = {
574 BFIN_SND_RES(0), 539 BFIN_SND_RES(0),
575 BFIN_SND_RES(1), 540 BFIN_SND_RES(1),
576}; 541};
542#endif
577 543
578static struct platform_device bfin_pcm = { 544#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
579 .name = "bfin-pcm-audio", 545static struct platform_device bfin_i2s_pcm = {
546 .name = "bfin-i2s-pcm-audio",
547 .id = -1,
548};
549#endif
550
551#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
552static struct platform_device bfin_tdm_pcm = {
553 .name = "bfin-tdm-pcm-audio",
554 .id = -1,
555};
556#endif
557
558#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
559static struct platform_device bfin_ac97_pcm = {
560 .name = "bfin-ac97-pcm-audio",
580 .id = -1, 561 .id = -1,
581}; 562};
582#endif 563#endif
@@ -605,13 +586,6 @@ static struct platform_device bfin_tdm = {
605}; 586};
606#endif 587#endif
607 588
608#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
609static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
610 .enable_dma = 0,
611 .bits_per_word = 8,
612};
613#endif
614
615static struct spi_board_info bfin_spi_board_info[] __initdata = { 589static struct spi_board_info bfin_spi_board_info[] __initdata = {
616#if defined(CONFIG_MTD_M25P80) \ 590#if defined(CONFIG_MTD_M25P80) \
617 || defined(CONFIG_MTD_M25P80_MODULE) 591 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -627,18 +601,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
627 }, 601 },
628#endif 602#endif
629 603
630#if defined(CONFIG_BFIN_SPI_ADC) \
631 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
632 {
633 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
634 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
635 .bus_num = 0, /* Framework bus number */
636 .chip_select = 1, /* Framework chip select. */
637 .platform_data = NULL, /* No spi_driver specific config */
638 .controller_data = &spi_adc_chip_info,
639 },
640#endif
641
642#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 604#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
643 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 605 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
644 { 606 {
@@ -647,7 +609,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
647 .bus_num = 0, 609 .bus_num = 0,
648 .chip_select = 4, 610 .chip_select = 4,
649 .platform_data = "ad1836", 611 .platform_data = "ad1836",
650 .controller_data = &ad1836_spi_chip_info,
651 .mode = SPI_MODE_3, 612 .mode = SPI_MODE_3,
652 }, 613 },
653#endif 614#endif
@@ -670,7 +631,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
670 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 631 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
671 .bus_num = 0, 632 .bus_num = 0,
672 .chip_select = 2, 633 .chip_select = 2,
673 .controller_data = &spi_ad7877_chip_info,
674 }, 634 },
675#endif 635#endif
676#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 636#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -681,7 +641,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
681 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 641 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
682 .bus_num = 0, 642 .bus_num = 0,
683 .chip_select = 3, 643 .chip_select = 3,
684 .controller_data = &spi_ad7879_chip_info,
685 .mode = SPI_CPHA | SPI_CPOL, 644 .mode = SPI_CPHA | SPI_CPOL,
686 }, 645 },
687#endif 646#endif
@@ -691,7 +650,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
691 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 650 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
692 .bus_num = 0, 651 .bus_num = 0,
693 .chip_select = 1, 652 .chip_select = 1,
694 .controller_data = &spidev_chip_info,
695 }, 653 },
696#endif 654#endif
697#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 655#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -700,7 +658,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
700 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 658 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
701 .bus_num = 0, 659 .bus_num = 0,
702 .chip_select = 7, 660 .chip_select = 7,
703 .controller_data = &lq035q1_spi_chip_info,
704 .mode = SPI_CPHA | SPI_CPOL, 661 .mode = SPI_CPHA | SPI_CPOL,
705 }, 662 },
706#endif 663#endif
@@ -1276,9 +1233,16 @@ static struct platform_device *stamp_devices[] __initdata = {
1276 &ezkit_flash_device, 1233 &ezkit_flash_device,
1277#endif 1234#endif
1278 1235
1279#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 1236#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1280 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 1237 &bfin_i2s_pcm,
1281 &bfin_pcm, 1238#endif
1239
1240#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1241 &bfin_tdm_pcm,
1242#endif
1243
1244#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1245 &bfin_ac97_pcm,
1282#endif 1246#endif
1283 1247
1284#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1248#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 18d303dd5627..ec4bc7429c9f 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -314,29 +314,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
314/* SPI flash chip (m25p64) */ 314/* SPI flash chip (m25p64) */
315static struct bfin5xx_spi_chip spi_flash_chip_info = { 315static struct bfin5xx_spi_chip spi_flash_chip_info = {
316 .enable_dma = 0, /* use dma transfer with this chip*/ 316 .enable_dma = 0, /* use dma transfer with this chip*/
317 .bits_per_word = 8,
318};
319#endif
320
321#if defined(CONFIG_BFIN_SPI_ADC) \
322 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
323/* SPI ADC chip */
324static struct bfin5xx_spi_chip spi_adc_chip_info = {
325 .enable_dma = 0, /* use dma transfer with this chip*/
326/*
327 * tll6527m V1.0 does not support native spi slave selects
328 * hence DMA mode will not be useful since the ADC needs
329 * CS to toggle for each sample and cs_change_per_word
330 * seems to be removed from spi_bfin5xx.c
331 */
332 .bits_per_word = 16,
333}; 317};
334#endif 318#endif
335 319
336#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 320#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
337static struct bfin5xx_spi_chip mmc_spi_chip_info = { 321static struct bfin5xx_spi_chip mmc_spi_chip_info = {
338 .enable_dma = 0, 322 .enable_dma = 0,
339 .bits_per_word = 8,
340}; 323};
341#endif 324#endif
342 325
@@ -359,21 +342,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
359}; 342};
360#endif 343#endif
361 344
362#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) \
363 || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
364static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
365 .enable_dma = 0,
366 .bits_per_word = 16,
367};
368#endif
369
370#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
371static struct bfin5xx_spi_chip spidev_chip_info = {
372 .enable_dma = 0,
373 .bits_per_word = 8,
374};
375#endif
376
377#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 345#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
378static struct platform_device bfin_i2s = { 346static struct platform_device bfin_i2s = {
379 .name = "bfin-i2s", 347 .name = "bfin-i2s",
@@ -382,24 +350,7 @@ static struct platform_device bfin_i2s = {
382}; 350};
383#endif 351#endif
384 352
385#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
386static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
387 .enable_dma = 0,
388 .bits_per_word = 8,
389};
390#endif
391
392#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE) 353#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE)
393static struct bfin5xx_spi_chip spi_mcp23s08_sys_chip_info = {
394 .enable_dma = 0,
395 .bits_per_word = 8,
396};
397
398static struct bfin5xx_spi_chip spi_mcp23s08_usr_chip_info = {
399 .enable_dma = 0,
400 .bits_per_word = 8,
401};
402
403#include <linux/spi/mcp23s08.h> 354#include <linux/spi/mcp23s08.h>
404static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = { 355static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = {
405 .chip[0].is_present = true, 356 .chip[0].is_present = true,
@@ -429,22 +380,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
429 }, 380 },
430#endif 381#endif
431 382
432#if defined(CONFIG_BFIN_SPI_ADC)
433 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
434 {
435 .modalias = "bfin_spi_adc",
436 /* Name of spi_driver for this device */
437 .max_speed_hz = 10000000,
438 /* max spi clock (SCK) speed in HZ */
439 .bus_num = 0, /* Framework bus number */
440 .chip_select = EXP_GPIO_SPISEL_BASE + 0x04 + MAX_CTRL_CS,
441 /* Framework chip select. */
442 .platform_data = NULL, /* No spi_driver specific config */
443 .controller_data = &spi_adc_chip_info,
444 .mode = SPI_MODE_0,
445 },
446#endif
447
448#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 383#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
449 { 384 {
450 .modalias = "mmc_spi", 385 .modalias = "mmc_spi",
@@ -470,7 +405,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
470 /* max spi clock (SCK) speed in HZ */ 405 /* max spi clock (SCK) speed in HZ */
471 .bus_num = 0, 406 .bus_num = 0,
472 .chip_select = EXP_GPIO_SPISEL_BASE + 0x07 + MAX_CTRL_CS, 407 .chip_select = EXP_GPIO_SPISEL_BASE + 0x07 + MAX_CTRL_CS,
473 .controller_data = &spi_ad7879_chip_info,
474 .mode = SPI_CPHA | SPI_CPOL, 408 .mode = SPI_CPHA | SPI_CPOL,
475 }, 409 },
476#endif 410#endif
@@ -482,7 +416,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
482 .bus_num = 0, 416 .bus_num = 0,
483 .chip_select = EXP_GPIO_SPISEL_BASE + 0x03 + MAX_CTRL_CS, 417 .chip_select = EXP_GPIO_SPISEL_BASE + 0x03 + MAX_CTRL_CS,
484 .mode = SPI_CPHA | SPI_CPOL, 418 .mode = SPI_CPHA | SPI_CPOL,
485 .controller_data = &spidev_chip_info,
486 }, 419 },
487#endif 420#endif
488#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 421#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -491,7 +424,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
491 .max_speed_hz = 20000000, 424 .max_speed_hz = 20000000,
492 .bus_num = 0, 425 .bus_num = 0,
493 .chip_select = EXP_GPIO_SPISEL_BASE + 0x06 + MAX_CTRL_CS, 426 .chip_select = EXP_GPIO_SPISEL_BASE + 0x06 + MAX_CTRL_CS,
494 .controller_data = &lq035q1_spi_chip_info,
495 .mode = SPI_CPHA | SPI_CPOL, 427 .mode = SPI_CPHA | SPI_CPOL,
496 }, 428 },
497#endif 429#endif
@@ -502,7 +434,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
502 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 434 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
503 .bus_num = 0, 435 .bus_num = 0,
504 .chip_select = EXP_GPIO_SPISEL_BASE + 0x01 + MAX_CTRL_CS, 436 .chip_select = EXP_GPIO_SPISEL_BASE + 0x01 + MAX_CTRL_CS,
505 .controller_data = &spi_mcp23s08_sys_chip_info,
506 .mode = SPI_CPHA | SPI_CPOL, 437 .mode = SPI_CPHA | SPI_CPOL,
507 }, 438 },
508 { 439 {
@@ -511,7 +442,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
511 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 442 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
512 .bus_num = 0, 443 .bus_num = 0,
513 .chip_select = EXP_GPIO_SPISEL_BASE + 0x02 + MAX_CTRL_CS, 444 .chip_select = EXP_GPIO_SPISEL_BASE + 0x02 + MAX_CTRL_CS,
514 .controller_data = &spi_mcp23s08_usr_chip_info,
515 .mode = SPI_CPHA | SPI_CPOL, 445 .mode = SPI_CPHA | SPI_CPOL,
516 }, 446 },
517#endif 447#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index e66a7e89cd3c..688470611e15 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -11,8 +11,8 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List 14 * - Revision F, 05/23/2011; ADSP-BF526 Blackfin Processor Anomaly List
15 * - Revision H, 04/29/2010; ADSP-BF527 Blackfin Processor Anomaly List 15 * - Revision I, 05/23/2011; ADSP-BF527 Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -57,7 +57,7 @@
57/* Incorrect Access of OTP_STATUS During otp_write() Function */ 57/* Incorrect Access of OTP_STATUS During otp_write() Function */
58#define ANOMALY_05000328 (_ANOMALY_BF527(< 2)) 58#define ANOMALY_05000328 (_ANOMALY_BF527(< 2))
59/* Host DMA Boot Modes Are Not Functional */ 59/* Host DMA Boot Modes Are Not Functional */
60#define ANOMALY_05000330 (__SILICON_REVISION__ < 2) 60#define ANOMALY_05000330 (_ANOMALY_BF527(< 2))
61/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */ 61/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
62#define ANOMALY_05000337 (_ANOMALY_BF527(< 2)) 62#define ANOMALY_05000337 (_ANOMALY_BF527(< 2))
63/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */ 63/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
@@ -135,7 +135,7 @@
135/* Incorrect Default Internal Voltage Regulator Setting */ 135/* Incorrect Default Internal Voltage Regulator Setting */
136#define ANOMALY_05000410 (_ANOMALY_BF527(< 2)) 136#define ANOMALY_05000410 (_ANOMALY_BF527(< 2))
137/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */ 137/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */
138#define ANOMALY_05000411 (_ANOMALY_BF526_BF527(< 1, < 2)) 138#define ANOMALY_05000411 (_ANOMALY_BF526(< 1))
139/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */ 139/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */
140#define ANOMALY_05000414 (_ANOMALY_BF526_BF527(< 1, < 2)) 140#define ANOMALY_05000414 (_ANOMALY_BF526_BF527(< 1, < 2))
141/* DEB2_URGENT Bit Not Functional */ 141/* DEB2_URGENT Bit Not Functional */
@@ -181,11 +181,11 @@
181/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 181/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
182#define ANOMALY_05000443 (1) 182#define ANOMALY_05000443 (1)
183/* The WURESET Bit in the SYSCR Register is not Functional */ 183/* The WURESET Bit in the SYSCR Register is not Functional */
184#define ANOMALY_05000445 (1) 184#define ANOMALY_05000445 (_ANOMALY_BF527(>= 0))
185/* USB DMA Mode 1 Short Packet Data Corruption */ 185/* USB DMA Short Packet Data Corruption */
186#define ANOMALY_05000450 (1) 186#define ANOMALY_05000450 (1)
187/* BCODE_QUICKBOOT, BCODE_ALLBOOT, and BCODE_FULLBOOT Settings in SYSCR Register Not Functional */ 187/* BCODE_QUICKBOOT, BCODE_ALLBOOT, and BCODE_FULLBOOT Settings in SYSCR Register Not Functional */
188#define ANOMALY_05000451 (1) 188#define ANOMALY_05000451 (_ANOMALY_BF527(>= 0))
189/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */ 189/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
190#define ANOMALY_05000452 (_ANOMALY_BF526_BF527(< 1, >= 0)) 190#define ANOMALY_05000452 (_ANOMALY_BF526_BF527(< 1, >= 0))
191/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */ 191/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
@@ -198,19 +198,19 @@
198#define ANOMALY_05000461 (1) 198#define ANOMALY_05000461 (1)
199/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 199/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
200#define ANOMALY_05000462 (1) 200#define ANOMALY_05000462 (1)
201/* USB Rx DMA hang */ 201/* USB Rx DMA Hang */
202#define ANOMALY_05000465 (1) 202#define ANOMALY_05000465 (1)
203/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */ 203/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
204#define ANOMALY_05000466 (1) 204#define ANOMALY_05000466 (1)
205/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */ 205/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
206#define ANOMALY_05000467 (1) 206#define ANOMALY_05000467 (1)
207/* PLL Latches Incorrect Settings During Reset */ 207/* PLL Latches Incorrect Settings During Reset */
208#define ANOMALY_05000469 (1) 208#define ANOMALY_05000469 (1)
209/* Incorrect Default MSEL Value in PLL_CTL */ 209/* Incorrect Default MSEL Value in PLL_CTL */
210#define ANOMALY_05000472 (_ANOMALY_BF526(>= 0)) 210#define ANOMALY_05000472 (_ANOMALY_BF526(>= 0))
211/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 211/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
212#define ANOMALY_05000473 (1) 212#define ANOMALY_05000473 (1)
213/* Possible Lockup Condition whem Modifying PLL from External Memory */ 213/* Possible Lockup Condition when Modifying PLL from External Memory */
214#define ANOMALY_05000475 (1) 214#define ANOMALY_05000475 (1)
215/* TESTSET Instruction Cannot Be Interrupted */ 215/* TESTSET Instruction Cannot Be Interrupted */
216#define ANOMALY_05000477 (1) 216#define ANOMALY_05000477 (1)
@@ -219,11 +219,19 @@
219/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */ 219/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */
220#define ANOMALY_05000483 (1) 220#define ANOMALY_05000483 (1)
221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3)) 222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, >= 0))
223/* The CODEC Zero-Cross Detect Feature is not Functional */ 223/* The CODEC Zero-Cross Detect Feature is not Functional */
224#define ANOMALY_05000487 (1) 224#define ANOMALY_05000487 (1)
225/* IFLUSH sucks at life */ 225/* SPI Master Boot Can Fail Under Certain Conditions */
226#define ANOMALY_05000490 (1)
227/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
226#define ANOMALY_05000491 (1) 228#define ANOMALY_05000491 (1)
229/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
230#define ANOMALY_05000494 (1)
231/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
232#define ANOMALY_05000498 (1)
233/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
234#define ANOMALY_05000501 (1)
227 235
228/* Anomalies that don't exist on this proc */ 236/* Anomalies that don't exist on this proc */
229#define ANOMALY_05000099 (0) 237#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index d4bfcea56828..eb325ed6607e 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -159,22 +159,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
159/* SPI flash chip (m25p64) */ 159/* SPI flash chip (m25p64) */
160static struct bfin5xx_spi_chip spi_flash_chip_info = { 160static struct bfin5xx_spi_chip spi_flash_chip_info = {
161 .enable_dma = 0, /* use dma transfer with this chip*/ 161 .enable_dma = 0, /* use dma transfer with this chip*/
162 .bits_per_word = 8,
163};
164#endif
165
166#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
167/* SPI ADC chip */
168static struct bfin5xx_spi_chip spi_adc_chip_info = {
169 .enable_dma = 1, /* use dma transfer with this chip*/
170 .bits_per_word = 16,
171};
172#endif
173
174#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
175static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
176 .enable_dma = 0,
177 .bits_per_word = 16,
178}; 162};
179#endif 163#endif
180 164
@@ -195,24 +179,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
195 }, 179 },
196#endif 180#endif
197 181
198#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
199 {
200 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
201 .max_speed_hz = 4, /* actual baudrate is SCLK/(2xspeed_hz) */
202 .bus_num = 1, /* Framework bus number */
203 .chip_select = 1, /* Framework chip select. */
204 .platform_data = NULL, /* No spi_driver specific config */
205 .controller_data = &spi_adc_chip_info,
206 },
207#endif
208
209#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 182#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
210 { 183 {
211 .modalias = "ad183x", 184 .modalias = "ad183x",
212 .max_speed_hz = 16, 185 .max_speed_hz = 16,
213 .bus_num = 1, 186 .bus_num = 1,
214 .chip_select = 4, 187 .chip_select = 4,
215 .controller_data = &ad1836_spi_chip_info,
216 }, 188 },
217#endif 189#endif
218 190
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 87b5af3693c1..b0ec825fb4ec 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -102,21 +102,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
102/* SPI flash chip (m25p64) */ 102/* SPI flash chip (m25p64) */
103static struct bfin5xx_spi_chip spi_flash_chip_info = { 103static struct bfin5xx_spi_chip spi_flash_chip_info = {
104 .enable_dma = 0, /* use dma transfer with this chip*/ 104 .enable_dma = 0, /* use dma transfer with this chip*/
105 .bits_per_word = 8,
106}; 105};
107#endif 106#endif
108 107
109#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 108#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
110static struct bfin5xx_spi_chip mmc_spi_chip_info = { 109static struct bfin5xx_spi_chip mmc_spi_chip_info = {
111 .enable_dma = 0, 110 .enable_dma = 0,
112 .bits_per_word = 8,
113};
114#endif
115
116#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
117static struct bfin5xx_spi_chip spidev_chip_info = {
118 .enable_dma = 0,
119 .bits_per_word = 8,
120}; 111};
121#endif 112#endif
122 113
@@ -151,7 +142,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
151 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 142 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
152 .bus_num = 0, 143 .bus_num = 0,
153 .chip_select = 7, 144 .chip_select = 7,
154 .controller_data = &spidev_chip_info,
155 }, 145 },
156#endif 146#endif
157}; 147};
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 4d5604eaa7c2..14f54a31e74c 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -59,29 +59,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
59/* SPI flash chip (m25p64) */ 59/* SPI flash chip (m25p64) */
60static struct bfin5xx_spi_chip spi_flash_chip_info = { 60static struct bfin5xx_spi_chip spi_flash_chip_info = {
61 .enable_dma = 0, /* use dma transfer with this chip*/ 61 .enable_dma = 0, /* use dma transfer with this chip*/
62 .bits_per_word = 8,
63};
64#endif
65
66/* SPI ADC chip */
67#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
68static struct bfin5xx_spi_chip spi_adc_chip_info = {
69 .enable_dma = 1, /* use dma transfer with this chip*/
70 .bits_per_word = 16,
71};
72#endif
73
74#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
75static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
76 .enable_dma = 0,
77 .bits_per_word = 16,
78}; 62};
79#endif 63#endif
80 64
81#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 65#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
82static struct bfin5xx_spi_chip mmc_spi_chip_info = { 66static struct bfin5xx_spi_chip mmc_spi_chip_info = {
83 .enable_dma = 0, 67 .enable_dma = 0,
84 .bits_per_word = 8,
85}; 68};
86#endif 69#endif
87 70
@@ -99,24 +82,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
99 }, 82 },
100#endif 83#endif
101 84
102#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
103 {
104 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
105 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
106 .bus_num = 0, /* Framework bus number */
107 .chip_select = 2, /* Framework chip select. */
108 .platform_data = NULL, /* No spi_driver specific config */
109 .controller_data = &spi_adc_chip_info,
110 },
111#endif
112
113#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 85#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
114 { 86 {
115 .modalias = "ad183x", 87 .modalias = "ad183x",
116 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 88 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
117 .bus_num = 0, 89 .bus_num = 0,
118 .chip_select = 4, 90 .chip_select = 4,
119 .controller_data = &ad1836_spi_chip_info,
120 }, 91 },
121#endif 92#endif
122 93
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index b67b91d82242..ecd2801f050d 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -210,29 +210,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
210/* SPI flash chip (m25p64) */ 210/* SPI flash chip (m25p64) */
211static struct bfin5xx_spi_chip spi_flash_chip_info = { 211static struct bfin5xx_spi_chip spi_flash_chip_info = {
212 .enable_dma = 0, /* use dma transfer with this chip*/ 212 .enable_dma = 0, /* use dma transfer with this chip*/
213 .bits_per_word = 8,
214};
215#endif
216
217#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
218/* SPI ADC chip */
219static struct bfin5xx_spi_chip spi_adc_chip_info = {
220 .enable_dma = 1, /* use dma transfer with this chip*/
221 .bits_per_word = 16,
222};
223#endif
224
225#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
226static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
227 .enable_dma = 0,
228 .bits_per_word = 16,
229};
230#endif
231
232#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
233static struct bfin5xx_spi_chip spidev_chip_info = {
234 .enable_dma = 0,
235 .bits_per_word = 8,
236}; 213};
237#endif 214#endif
238 215
@@ -250,24 +227,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
250 }, 227 },
251#endif 228#endif
252 229
253#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
254 {
255 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
256 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
257 .bus_num = 0, /* Framework bus number */
258 .chip_select = 1, /* Framework chip select. */
259 .platform_data = NULL, /* No spi_driver specific config */
260 .controller_data = &spi_adc_chip_info,
261 },
262#endif
263
264#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 230#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
265 { 231 {
266 .modalias = "ad183x", 232 .modalias = "ad183x",
267 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 233 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
268 .bus_num = 0, 234 .bus_num = 0,
269 .chip_select = 4, 235 .chip_select = 4,
270 .controller_data = &ad1836_spi_chip_info,
271 }, 236 },
272#endif 237#endif
273#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 238#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -276,7 +241,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
276 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 241 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
277 .bus_num = 0, 242 .bus_num = 0,
278 .chip_select = 1, 243 .chip_select = 1,
279 .controller_data = &spidev_chip_info,
280 }, 244 },
281#endif 245#endif
282}; 246};
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index a377d8afea03..fbee77fa9211 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -110,7 +110,6 @@ static struct platform_device dm9000_device2 = {
110#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 110#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
111static struct bfin5xx_spi_chip mmc_spi_chip_info = { 111static struct bfin5xx_spi_chip mmc_spi_chip_info = {
112 .enable_dma = 0, /* if 1 - block!!! */ 112 .enable_dma = 0, /* if 1 - block!!! */
113 .bits_per_word = 8,
114}; 113};
115#endif 114#endif
116 115
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 43224ef00b8c..964a8e5f79b4 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -80,6 +80,9 @@ static struct resource net2272_bfin_resources[] = {
80 .end = 0x20300000 + 0x100, 80 .end = 0x20300000 + 0x100,
81 .flags = IORESOURCE_MEM, 81 .flags = IORESOURCE_MEM,
82 }, { 82 }, {
83 .start = 1,
84 .flags = IORESOURCE_BUS,
85 }, {
83 .start = IRQ_PF10, 86 .start = IRQ_PF10,
84 .end = IRQ_PF10, 87 .end = IRQ_PF10,
85 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 88 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -172,29 +175,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
172/* SPI flash chip (m25p64) */ 175/* SPI flash chip (m25p64) */
173static struct bfin5xx_spi_chip spi_flash_chip_info = { 176static struct bfin5xx_spi_chip spi_flash_chip_info = {
174 .enable_dma = 0, /* use dma transfer with this chip*/ 177 .enable_dma = 0, /* use dma transfer with this chip*/
175 .bits_per_word = 8,
176};
177#endif
178
179#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
180/* SPI ADC chip */
181static struct bfin5xx_spi_chip spi_adc_chip_info = {
182 .enable_dma = 1, /* use dma transfer with this chip*/
183 .bits_per_word = 16,
184};
185#endif
186
187#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
188static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
189 .enable_dma = 0,
190 .bits_per_word = 16,
191};
192#endif
193
194#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
195static struct bfin5xx_spi_chip spidev_chip_info = {
196 .enable_dma = 0,
197 .bits_per_word = 8,
198}; 178};
199#endif 179#endif
200 180
@@ -221,7 +201,6 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
221 201
222static struct bfin5xx_spi_chip mmc_spi_chip_info = { 202static struct bfin5xx_spi_chip mmc_spi_chip_info = {
223 .enable_dma = 0, 203 .enable_dma = 0,
224 .bits_per_word = 8,
225 .pio_interrupt = 0, 204 .pio_interrupt = 0,
226}; 205};
227#endif 206#endif
@@ -240,17 +219,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
240 }, 219 },
241#endif 220#endif
242 221
243#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
244 {
245 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
246 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
247 .bus_num = 0, /* Framework bus number */
248 .chip_select = 1, /* Framework chip select. */
249 .platform_data = NULL, /* No spi_driver specific config */
250 .controller_data = &spi_adc_chip_info,
251 },
252#endif
253
254#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 222#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
255 { 223 {
256 .modalias = "ad183x", 224 .modalias = "ad183x",
@@ -258,7 +226,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
258 .bus_num = 0, 226 .bus_num = 0,
259 .chip_select = 4, 227 .chip_select = 4,
260 .platform_data = "ad1836", /* only includes chip name for the moment */ 228 .platform_data = "ad1836", /* only includes chip name for the moment */
261 .controller_data = &ad1836_spi_chip_info,
262 .mode = SPI_MODE_3, 229 .mode = SPI_MODE_3,
263 }, 230 },
264#endif 231#endif
@@ -269,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
269 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 236 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
270 .bus_num = 0, 237 .bus_num = 0,
271 .chip_select = 1, 238 .chip_select = 1,
272 .controller_data = &spidev_chip_info,
273 }, 239 },
274#endif 240#endif
275#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 241#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -659,6 +625,41 @@ static struct platform_device *stamp_devices[] __initdata = {
659#endif 625#endif
660}; 626};
661 627
628static int __init net2272_init(void)
629{
630#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
631 int ret;
632
633 /* Set PF0 to 0, PF1 to 1 make /AMS3 work properly */
634 ret = gpio_request(GPIO_PF0, "net2272");
635 if (ret)
636 return ret;
637
638 ret = gpio_request(GPIO_PF1, "net2272");
639 if (ret) {
640 gpio_free(GPIO_PF0);
641 return ret;
642 }
643
644 ret = gpio_request(GPIO_PF11, "net2272");
645 if (ret) {
646 gpio_free(GPIO_PF0);
647 gpio_free(GPIO_PF1);
648 return ret;
649 }
650
651 gpio_direction_output(GPIO_PF0, 0);
652 gpio_direction_output(GPIO_PF1, 1);
653
654 /* Reset the USB chip */
655 gpio_direction_output(GPIO_PF11, 0);
656 mdelay(2);
657 gpio_set_value(GPIO_PF11, 1);
658#endif
659
660 return 0;
661}
662
662static int __init stamp_init(void) 663static int __init stamp_init(void)
663{ 664{
664 int ret; 665 int ret;
@@ -685,6 +686,9 @@ static int __init stamp_init(void)
685 } 686 }
686#endif 687#endif
687 688
689 if (net2272_init())
690 pr_warning("unable to configure net2272; it probably won't work\n");
691
688 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 692 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
689 return 0; 693 return 0;
690} 694}
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 72aa59440f82..03f2b40912a3 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision F, 05/25/2010; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List 14 * - Revision G, 05/23/2011; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -152,7 +152,7 @@
152#define ANOMALY_05000277 (__SILICON_REVISION__ < 6) 152#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
153/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 153/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
154#define ANOMALY_05000278 (__SILICON_REVISION__ < 6) 154#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
155/* False Hardware Error Exception when ISR Context Is Not Restored */ 155/* False Hardware Error when ISR Context Is Not Restored */
156#define ANOMALY_05000281 (__SILICON_REVISION__ < 6) 156#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
157/* Memory DMA Corruption with 32-Bit Data and Traffic Control */ 157/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
158#define ANOMALY_05000282 (__SILICON_REVISION__ < 6) 158#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
@@ -210,18 +210,25 @@
210#define ANOMALY_05000462 (1) 210#define ANOMALY_05000462 (1)
211/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */ 211/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
212#define ANOMALY_05000471 (1) 212#define ANOMALY_05000471 (1)
213/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 213/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
214#define ANOMALY_05000473 (1) 214#define ANOMALY_05000473 (1)
215/* Possible Lockup Condition whem Modifying PLL from External Memory */ 215/* Possible Lockup Condition when Modifying PLL from External Memory */
216#define ANOMALY_05000475 (1) 216#define ANOMALY_05000475 (1)
217/* TESTSET Instruction Cannot Be Interrupted */ 217/* TESTSET Instruction Cannot Be Interrupted */
218#define ANOMALY_05000477 (1) 218#define ANOMALY_05000477 (1)
219/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 219/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
220#define ANOMALY_05000481 (1) 220#define ANOMALY_05000481 (1)
221/* IFLUSH sucks at life */ 221/* PLL May Latch Incorrect Values Coming Out of Reset */
222#define ANOMALY_05000489 (1)
223/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
222#define ANOMALY_05000491 (1) 224#define ANOMALY_05000491 (1)
225/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
226#define ANOMALY_05000494 (1)
227/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
228#define ANOMALY_05000501 (1)
223 229
224/* These anomalies have been "phased" out of analog.com anomaly sheets and are 230/*
231 * These anomalies have been "phased" out of analog.com anomaly sheets and are
225 * here to show running on older silicon just isn't feasible. 232 * here to show running on older silicon just isn't feasible.
226 */ 233 */
227 234
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index d582b810e7a7..44fd8409db10 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -61,29 +61,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
61/* SPI flash chip (m25p64) */ 61/* SPI flash chip (m25p64) */
62static struct bfin5xx_spi_chip spi_flash_chip_info = { 62static struct bfin5xx_spi_chip spi_flash_chip_info = {
63 .enable_dma = 0, /* use dma transfer with this chip*/ 63 .enable_dma = 0, /* use dma transfer with this chip*/
64 .bits_per_word = 8,
65};
66#endif
67
68#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
69/* SPI ADC chip */
70static struct bfin5xx_spi_chip spi_adc_chip_info = {
71 .enable_dma = 1, /* use dma transfer with this chip*/
72 .bits_per_word = 16,
73};
74#endif
75
76#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
77static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
78 .enable_dma = 0,
79 .bits_per_word = 16,
80}; 64};
81#endif 65#endif
82 66
83#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 67#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
84static struct bfin5xx_spi_chip mmc_spi_chip_info = { 68static struct bfin5xx_spi_chip mmc_spi_chip_info = {
85 .enable_dma = 0, 69 .enable_dma = 0,
86 .bits_per_word = 8,
87}; 70};
88#endif 71#endif
89 72
@@ -101,24 +84,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
101 }, 84 },
102#endif 85#endif
103 86
104#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
105 {
106 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
107 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
108 .bus_num = 0, /* Framework bus number */
109 .chip_select = 1, /* Framework chip select. */
110 .platform_data = NULL, /* No spi_driver specific config */
111 .controller_data = &spi_adc_chip_info,
112 },
113#endif
114
115#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 87#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
116 { 88 {
117 .modalias = "ad183x", 89 .modalias = "ad183x",
118 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 90 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
119 .bus_num = 0, 91 .bus_num = 0,
120 .chip_select = 4, 92 .chip_select = 4,
121 .controller_data = &ad1836_spi_chip_info,
122 }, 93 },
123#endif 94#endif
124 95
@@ -766,6 +737,24 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
766#endif 737#endif
767}; 738};
768 739
740static int __init net2272_init(void)
741{
742#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
743 int ret;
744
745 ret = gpio_request(GPIO_PG14, "net2272");
746 if (ret)
747 return ret;
748
749 /* Reset USB Chip, PG14 */
750 gpio_direction_output(GPIO_PG14, 0);
751 mdelay(2);
752 gpio_set_value(GPIO_PG14, 1);
753#endif
754
755 return 0;
756}
757
769static int __init cm_bf537e_init(void) 758static int __init cm_bf537e_init(void)
770{ 759{
771 printk(KERN_INFO "%s(): registering device resources\n", __func__); 760 printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -777,6 +766,10 @@ static int __init cm_bf537e_init(void)
777#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 766#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
778 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 767 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
779#endif 768#endif
769
770 if (net2272_init())
771 pr_warning("unable to configure net2272; it probably won't work\n");
772
780 return 0; 773 return 0;
781} 774}
782 775
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index cbb8098604c5..1b4ac5c64aae 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
62/* SPI flash chip (m25p64) */ 62/* SPI flash chip (m25p64) */
63static struct bfin5xx_spi_chip spi_flash_chip_info = { 63static struct bfin5xx_spi_chip spi_flash_chip_info = {
64 .enable_dma = 0, /* use dma transfer with this chip*/ 64 .enable_dma = 0, /* use dma transfer with this chip*/
65 .bits_per_word = 8,
66};
67#endif
68
69#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
70/* SPI ADC chip */
71static struct bfin5xx_spi_chip spi_adc_chip_info = {
72 .enable_dma = 1, /* use dma transfer with this chip*/
73 .bits_per_word = 16,
74};
75#endif
76
77#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
78static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
79 .enable_dma = 0,
80 .bits_per_word = 16,
81}; 65};
82#endif 66#endif
83 67
84#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 68#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
85static struct bfin5xx_spi_chip mmc_spi_chip_info = { 69static struct bfin5xx_spi_chip mmc_spi_chip_info = {
86 .enable_dma = 0, 70 .enable_dma = 0,
87 .bits_per_word = 8,
88}; 71};
89#endif 72#endif
90 73
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
102 }, 85 },
103#endif 86#endif
104 87
105#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
106 {
107 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
108 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
109 .bus_num = 0, /* Framework bus number */
110 .chip_select = 1, /* Framework chip select. */
111 .platform_data = NULL, /* No spi_driver specific config */
112 .controller_data = &spi_adc_chip_info,
113 },
114#endif
115
116#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 88#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
117 { 89 {
118 .modalias = "ad183x", 90 .modalias = "ad183x",
119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 91 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
120 .bus_num = 0, 92 .bus_num = 0,
121 .chip_select = 4, 93 .chip_select = 4,
122 .controller_data = &ad1836_spi_chip_info,
123 }, 94 },
124#endif 95#endif
125 96
@@ -731,6 +702,36 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
731#endif 702#endif
732}; 703};
733 704
705static int __init net2272_init(void)
706{
707#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
708 int ret;
709
710 ret = gpio_request(GPIO_PH15, driver_name);
711 if (ret)
712 return ret;
713
714 ret = gpio_request(GPIO_PH13, "net2272");
715 if (ret) {
716 gpio_free(GPIO_PH15);
717 return ret;
718 }
719
720 /* Set PH15 Low make /AMS2 work properly */
721 gpio_direction_output(GPIO_PH15, 0);
722
723 /* enable CLKBUF output */
724 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
725
726 /* Reset the USB chip */
727 gpio_direction_output(GPIO_PH13, 0);
728 mdelay(2);
729 gpio_set_value(GPIO_PH13, 1);
730#endif
731
732 return 0;
733}
734
734static int __init cm_bf537u_init(void) 735static int __init cm_bf537u_init(void)
735{ 736{
736 printk(KERN_INFO "%s(): registering device resources\n", __func__); 737 printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -742,6 +743,10 @@ static int __init cm_bf537u_init(void)
742#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 743#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
743 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 744 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
744#endif 745#endif
746
747 if (net2272_init())
748 pr_warning("unable to configure net2272; it probably won't work\n");
749
745 return 0; 750 return 0;
746} 751}
747 752
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 6b4ff4605bff..8bc951de979d 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -130,7 +130,6 @@ static struct platform_device asmb_flash_device = {
130 130
131static struct bfin5xx_spi_chip mmc_spi_chip_info = { 131static struct bfin5xx_spi_chip mmc_spi_chip_info = {
132 .enable_dma = 0, /* use no dma transfer with this chip*/ 132 .enable_dma = 0, /* use no dma transfer with this chip*/
133 .bits_per_word = 8,
134}; 133};
135 134
136#endif 135#endif
@@ -161,7 +160,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
161 160
162static struct bfin5xx_spi_chip spi_dataflash_chip_info = { 161static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
163 .enable_dma = 0, /* use no dma transfer with this chip*/ 162 .enable_dma = 0, /* use no dma transfer with this chip*/
164 .bits_per_word = 8,
165}; 163};
166#endif 164#endif
167 165
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index bfb3671a78da..c62f9dccd9f7 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -159,14 +159,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
159/* SPI flash chip (m25p64) */ 159/* SPI flash chip (m25p64) */
160static struct bfin5xx_spi_chip spi_flash_chip_info = { 160static struct bfin5xx_spi_chip spi_flash_chip_info = {
161 .enable_dma = 0, /* use dma transfer with this chip*/ 161 .enable_dma = 0, /* use dma transfer with this chip*/
162 .bits_per_word = 8,
163}; 162};
164#endif 163#endif
165 164
166#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 165#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
167static struct bfin5xx_spi_chip mmc_spi_chip_info = { 166static struct bfin5xx_spi_chip mmc_spi_chip_info = {
168 .enable_dma = 0, 167 .enable_dma = 0,
169 .bits_per_word = 8,
170}; 168};
171#endif 169#endif
172 170
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 9389f03e3b0a..3b8151d99b9a 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -184,40 +184,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
184/* SPI flash chip (m25p64) */ 184/* SPI flash chip (m25p64) */
185static struct bfin5xx_spi_chip spi_flash_chip_info = { 185static struct bfin5xx_spi_chip spi_flash_chip_info = {
186 .enable_dma = 0, /* use dma transfer with this chip*/ 186 .enable_dma = 0, /* use dma transfer with this chip*/
187 .bits_per_word = 8,
188};
189#endif
190
191#if defined(CONFIG_BFIN_SPI_ADC) \
192 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
193/* SPI ADC chip */
194static struct bfin5xx_spi_chip spi_adc_chip_info = {
195 .enable_dma = 1, /* use dma transfer with this chip*/
196 .bits_per_word = 16,
197};
198#endif
199
200#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
201 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
202static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
203 .enable_dma = 0,
204 .bits_per_word = 16,
205}; 187};
206#endif 188#endif
207 189
208#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 190#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
209static struct bfin5xx_spi_chip mmc_spi_chip_info = { 191static struct bfin5xx_spi_chip mmc_spi_chip_info = {
210 .enable_dma = 0, 192 .enable_dma = 0,
211 .bits_per_word = 8,
212}; 193};
213#endif 194#endif
214 195
215#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 196#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
216static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
217 .enable_dma = 0,
218 .bits_per_word = 16,
219};
220
221static const struct ad7877_platform_data bfin_ad7877_ts_info = { 197static const struct ad7877_platform_data bfin_ad7877_ts_info = {
222 .model = 7877, 198 .model = 7877,
223 .vref_delay_usecs = 50, /* internal, no capacitor */ 199 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -248,18 +224,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
248 }, 224 },
249#endif 225#endif
250 226
251#if defined(CONFIG_BFIN_SPI_ADC) \
252 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
253 {
254 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
255 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
256 .bus_num = 0, /* Framework bus number */
257 .chip_select = 1, /* Framework chip select. */
258 .platform_data = NULL, /* No spi_driver specific config */
259 .controller_data = &spi_adc_chip_info,
260 },
261#endif
262
263#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 227#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
264 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 228 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
265 { 229 {
@@ -267,7 +231,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
267 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 231 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
268 .bus_num = 0, 232 .bus_num = 0,
269 .chip_select = 4, 233 .chip_select = 4,
270 .controller_data = &ad1836_spi_chip_info,
271 }, 234 },
272#endif 235#endif
273#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 236#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -288,7 +251,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
288 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 251 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
289 .bus_num = 0, 252 .bus_num = 0,
290 .chip_select = 5, 253 .chip_select = 5,
291 .controller_data = &spi_ad7877_chip_info,
292}, 254},
293#endif 255#endif
294 256
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 76db1d483173..b52e6728f64f 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -367,6 +367,9 @@ static struct resource net2272_bfin_resources[] = {
367 .end = 0x20300000 + 0x100, 367 .end = 0x20300000 + 0x100,
368 .flags = IORESOURCE_MEM, 368 .flags = IORESOURCE_MEM,
369 }, { 369 }, {
370 .start = 1,
371 .flags = IORESOURCE_BUS,
372 }, {
370 .start = IRQ_PF7, 373 .start = IRQ_PF7,
371 .end = IRQ_PF7, 374 .end = IRQ_PF7,
372 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 375 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -533,49 +536,11 @@ static struct flash_platform_data bfin_spi_flash_data = {
533/* SPI flash chip (m25p64) */ 536/* SPI flash chip (m25p64) */
534static struct bfin5xx_spi_chip spi_flash_chip_info = { 537static struct bfin5xx_spi_chip spi_flash_chip_info = {
535 .enable_dma = 0, /* use dma transfer with this chip*/ 538 .enable_dma = 0, /* use dma transfer with this chip*/
536 .bits_per_word = 8,
537};
538#endif
539
540#if defined(CONFIG_BFIN_SPI_ADC) \
541 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
542/* SPI ADC chip */
543static struct bfin5xx_spi_chip spi_adc_chip_info = {
544 .enable_dma = 1, /* use dma transfer with this chip*/
545 .bits_per_word = 16,
546};
547#endif
548
549#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
550 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
551static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
552 .enable_dma = 0,
553 .bits_per_word = 16,
554};
555#endif
556
557#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \
558 || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
559static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
560 .enable_dma = 0,
561 .bits_per_word = 8,
562};
563#endif
564
565#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) \
566 || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
567static struct bfin5xx_spi_chip adav801_spi_chip_info = {
568 .enable_dma = 0,
569 .bits_per_word = 8,
570}; 539};
571#endif 540#endif
572 541
573#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE) 542#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
574#include <linux/input/ad714x.h> 543#include <linux/input/ad714x.h>
575static struct bfin5xx_spi_chip ad7147_spi_chip_info = {
576 .enable_dma = 0,
577 .bits_per_word = 16,
578};
579 544
580static struct ad714x_slider_plat ad7147_spi_slider_plat[] = { 545static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
581 { 546 {
@@ -685,7 +650,6 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = {
685#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) 650#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
686static struct bfin5xx_spi_chip ad2s90_spi_chip_info = { 651static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
687 .enable_dma = 0, 652 .enable_dma = 0,
688 .bits_per_word = 16,
689}; 653};
690#endif 654#endif
691 655
@@ -697,7 +661,6 @@ static unsigned short ad2s120x_platform_data[] = {
697 661
698static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = { 662static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
699 .enable_dma = 0, 663 .enable_dma = 0,
700 .bits_per_word = 16,
701}; 664};
702#endif 665#endif
703 666
@@ -714,14 +677,12 @@ static unsigned short ad2s1210_platform_data[] = {
714 677
715static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = { 678static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
716 .enable_dma = 0, 679 .enable_dma = 0,
717 .bits_per_word = 8,
718}; 680};
719#endif 681#endif
720 682
721#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE) 683#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
722static struct bfin5xx_spi_chip ad7314_spi_chip_info = { 684static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
723 .enable_dma = 0, 685 .enable_dma = 0,
724 .bits_per_word = 16,
725}; 686};
726#endif 687#endif
727 688
@@ -735,7 +696,6 @@ static unsigned short ad7816_platform_data[] = {
735 696
736static struct bfin5xx_spi_chip ad7816_spi_chip_info = { 697static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
737 .enable_dma = 0, 698 .enable_dma = 0,
738 .bits_per_word = 8,
739}; 699};
740#endif 700#endif
741 701
@@ -749,7 +709,6 @@ static unsigned long adt7310_platform_data[3] = {
749 709
750static struct bfin5xx_spi_chip adt7310_spi_chip_info = { 710static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
751 .enable_dma = 0, 711 .enable_dma = 0,
752 .bits_per_word = 8,
753}; 712};
754#endif 713#endif
755 714
@@ -758,11 +717,6 @@ static unsigned short ad7298_platform_data[] = {
758 GPIO_PF7, /* busy_pin */ 717 GPIO_PF7, /* busy_pin */
759 0, 718 0,
760}; 719};
761
762static struct bfin5xx_spi_chip ad7298_spi_chip_info = {
763 .enable_dma = 0,
764 .bits_per_word = 16,
765};
766#endif 720#endif
767 721
768#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE) 722#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
@@ -773,7 +727,6 @@ static unsigned long adt7316_spi_data[2] = {
773 727
774static struct bfin5xx_spi_chip adt7316_spi_chip_info = { 728static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
775 .enable_dma = 0, 729 .enable_dma = 0,
776 .bits_per_word = 8,
777}; 730};
778#endif 731#endif
779 732
@@ -800,18 +753,12 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
800 753
801static struct bfin5xx_spi_chip mmc_spi_chip_info = { 754static struct bfin5xx_spi_chip mmc_spi_chip_info = {
802 .enable_dma = 0, 755 .enable_dma = 0,
803 .bits_per_word = 8,
804 .pio_interrupt = 0, 756 .pio_interrupt = 0,
805}; 757};
806#endif 758#endif
807 759
808#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 760#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
809#include <linux/spi/ad7877.h> 761#include <linux/spi/ad7877.h>
810static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
811 .enable_dma = 0,
812 .bits_per_word = 16,
813};
814
815static const struct ad7877_platform_data bfin_ad7877_ts_info = { 762static const struct ad7877_platform_data bfin_ad7877_ts_info = {
816 .model = 7877, 763 .model = 7877,
817 .vref_delay_usecs = 50, /* internal, no capacitor */ 764 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -883,39 +830,13 @@ static const struct adxl34x_platform_data adxl34x_info = {
883}; 830};
884#endif 831#endif
885 832
886#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
887static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
888 .enable_dma = 0,
889 .bits_per_word = 16,
890};
891#endif
892
893#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
894static struct bfin5xx_spi_chip spidev_chip_info = {
895 .enable_dma = 0,
896 .bits_per_word = 8,
897};
898#endif
899
900#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
901static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
902 .enable_dma = 0,
903 .bits_per_word = 8,
904};
905#endif
906
907#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE) 833#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
908static struct bfin5xx_spi_chip enc28j60_spi_chip_info = { 834static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
909 .enable_dma = 1, 835 .enable_dma = 1,
910 .bits_per_word = 8,
911}; 836};
912#endif 837#endif
913 838
914#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE) 839#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
915static struct bfin5xx_spi_chip adf7021_spi_chip_info = {
916 .bits_per_word = 16,
917};
918
919#include <linux/spi/adf702x.h> 840#include <linux/spi/adf702x.h>
920#define TXREG 0x0160A470 841#define TXREG 0x0160A470
921static const u32 adf7021_regs[] = { 842static const u32 adf7021_regs[] = {
@@ -959,10 +880,6 @@ static inline void adf702x_mac_init(void) {}
959 880
960#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) 881#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
961#include <linux/spi/ads7846.h> 882#include <linux/spi/ads7846.h>
962static struct bfin5xx_spi_chip ad7873_spi_chip_info = {
963 .bits_per_word = 8,
964};
965
966static int ads7873_get_pendown_state(void) 883static int ads7873_get_pendown_state(void)
967{ 884{
968 return gpio_get_value(GPIO_PF6); 885 return gpio_get_value(GPIO_PF6);
@@ -1009,21 +926,12 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
1009/* DataFlash chip */ 926/* DataFlash chip */
1010static struct bfin5xx_spi_chip data_flash_chip_info = { 927static struct bfin5xx_spi_chip data_flash_chip_info = {
1011 .enable_dma = 0, /* use dma transfer with this chip*/ 928 .enable_dma = 0, /* use dma transfer with this chip*/
1012 .bits_per_word = 8,
1013};
1014#endif
1015
1016#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
1017static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
1018 .enable_dma = 0, /* use dma transfer with this chip*/
1019 .bits_per_word = 8,
1020}; 929};
1021#endif 930#endif
1022 931
1023#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE) 932#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE)
1024static struct bfin5xx_spi_chip spi_ad7476_chip_info = { 933static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
1025 .enable_dma = 0, /* use dma transfer with this chip*/ 934 .enable_dma = 0, /* use dma transfer with this chip*/
1026 .bits_per_word = 8,
1027}; 935};
1028#endif 936#endif
1029 937
@@ -1053,17 +961,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1053 .mode = SPI_MODE_3, 961 .mode = SPI_MODE_3,
1054 }, 962 },
1055#endif 963#endif
1056#if defined(CONFIG_BFIN_SPI_ADC) \
1057 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
1058 {
1059 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
1060 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
1061 .bus_num = 0, /* Framework bus number */
1062 .chip_select = 1, /* Framework chip select. */
1063 .platform_data = NULL, /* No spi_driver specific config */
1064 .controller_data = &spi_adc_chip_info,
1065 },
1066#endif
1067 964
1068#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ 965#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
1069 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 966 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
@@ -1073,7 +970,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1073 .bus_num = 0, 970 .bus_num = 0,
1074 .chip_select = 4, 971 .chip_select = 4,
1075 .platform_data = "ad1836", /* only includes chip name for the moment */ 972 .platform_data = "ad1836", /* only includes chip name for the moment */
1076 .controller_data = &ad1836_spi_chip_info,
1077 .mode = SPI_MODE_3, 973 .mode = SPI_MODE_3,
1078 }, 974 },
1079#endif 975#endif
@@ -1084,7 +980,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1084 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 980 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
1085 .bus_num = 0, 981 .bus_num = 0,
1086 .chip_select = 5, 982 .chip_select = 5,
1087 .controller_data = &ad1938_spi_chip_info,
1088 .mode = SPI_MODE_3, 983 .mode = SPI_MODE_3,
1089 }, 984 },
1090#endif 985#endif
@@ -1095,7 +990,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1095 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 990 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
1096 .bus_num = 0, 991 .bus_num = 0,
1097 .chip_select = 1, 992 .chip_select = 1,
1098 .controller_data = &adav801_spi_chip_info,
1099 .mode = SPI_MODE_3, 993 .mode = SPI_MODE_3,
1100 }, 994 },
1101#endif 995#endif
@@ -1109,7 +1003,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1109 .chip_select = 5, 1003 .chip_select = 5,
1110 .mode = SPI_MODE_3, 1004 .mode = SPI_MODE_3,
1111 .platform_data = &ad7147_spi_platform_data, 1005 .platform_data = &ad7147_spi_platform_data,
1112 .controller_data = &ad7147_spi_chip_info,
1113 }, 1006 },
1114#endif 1007#endif
1115 1008
@@ -1188,7 +1081,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1188 .bus_num = 0, 1081 .bus_num = 0,
1189 .chip_select = 4, /* CS, change it for your board */ 1082 .chip_select = 4, /* CS, change it for your board */
1190 .platform_data = ad7298_platform_data, 1083 .platform_data = ad7298_platform_data,
1191 .controller_data = &ad7298_spi_chip_info,
1192 .mode = SPI_MODE_3, 1084 .mode = SPI_MODE_3,
1193 }, 1085 },
1194#endif 1086#endif
@@ -1225,7 +1117,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1225 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 1117 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
1226 .bus_num = 0, 1118 .bus_num = 0,
1227 .chip_select = 1, 1119 .chip_select = 1,
1228 .controller_data = &spi_ad7877_chip_info,
1229 }, 1120 },
1230#endif 1121#endif
1231#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) 1122#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -1236,7 +1127,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1236 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 1127 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
1237 .bus_num = 0, 1128 .bus_num = 0,
1238 .chip_select = 1, 1129 .chip_select = 1,
1239 .controller_data = &spi_ad7879_chip_info,
1240 .mode = SPI_CPHA | SPI_CPOL, 1130 .mode = SPI_CPHA | SPI_CPOL,
1241 }, 1131 },
1242#endif 1132#endif
@@ -1246,7 +1136,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1246 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1136 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
1247 .bus_num = 0, 1137 .bus_num = 0,
1248 .chip_select = 1, 1138 .chip_select = 1,
1249 .controller_data = &spidev_chip_info,
1250 }, 1139 },
1251#endif 1140#endif
1252#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 1141#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -1255,7 +1144,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1255 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 1144 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
1256 .bus_num = 0, 1145 .bus_num = 0,
1257 .chip_select = 2, 1146 .chip_select = 2,
1258 .controller_data = &lq035q1_spi_chip_info,
1259 .mode = SPI_CPHA | SPI_CPOL, 1147 .mode = SPI_CPHA | SPI_CPOL,
1260 }, 1148 },
1261#endif 1149#endif
@@ -1278,7 +1166,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1278 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 1166 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
1279 .bus_num = 0, 1167 .bus_num = 0,
1280 .chip_select = 2, 1168 .chip_select = 2,
1281 .controller_data = &spi_adxl34x_chip_info,
1282 .mode = SPI_MODE_3, 1169 .mode = SPI_MODE_3,
1283 }, 1170 },
1284#endif 1171#endif
@@ -1288,7 +1175,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1288 .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */ 1175 .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
1289 .bus_num = 0, 1176 .bus_num = 0,
1290 .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ 1177 .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
1291 .controller_data = &adf7021_spi_chip_info,
1292 .platform_data = &adf7021_platform_data, 1178 .platform_data = &adf7021_platform_data,
1293 .mode = SPI_MODE_0, 1179 .mode = SPI_MODE_0,
1294 }, 1180 },
@@ -1300,7 +1186,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1300 .bus_num = 0, 1186 .bus_num = 0,
1301 .irq = IRQ_PF6, 1187 .irq = IRQ_PF6,
1302 .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ 1188 .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
1303 .controller_data = &ad7873_spi_chip_info,
1304 .platform_data = &ad7873_pdata, 1189 .platform_data = &ad7873_pdata,
1305 .mode = SPI_MODE_0, 1190 .mode = SPI_MODE_0,
1306 }, 1191 },
@@ -2632,9 +2517,25 @@ static struct resource bfin_snd_resources[][4] = {
2632 BFIN_SND_RES(0), 2517 BFIN_SND_RES(0),
2633 BFIN_SND_RES(1), 2518 BFIN_SND_RES(1),
2634}; 2519};
2520#endif
2521
2522#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
2523static struct platform_device bfin_i2s_pcm = {
2524 .name = "bfin-i2s-pcm-audio",
2525 .id = -1,
2526};
2527#endif
2635 2528
2636static struct platform_device bfin_pcm = { 2529#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
2637 .name = "bfin-pcm-audio", 2530static struct platform_device bfin_tdm_pcm = {
2531 .name = "bfin-tdm-pcm-audio",
2532 .id = -1,
2533};
2534#endif
2535
2536#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2537static struct platform_device bfin_ac97_pcm = {
2538 .name = "bfin-ac97-pcm-audio",
2638 .id = -1, 2539 .id = -1,
2639}; 2540};
2640#endif 2541#endif
@@ -2869,10 +2770,16 @@ static struct platform_device *stamp_devices[] __initdata = {
2869 &stamp_flash_device, 2770 &stamp_flash_device,
2870#endif 2771#endif
2871 2772
2872#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ 2773#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
2873 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \ 2774 &bfin_i2s_pcm,
2874 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2775#endif
2875 &bfin_pcm, 2776
2777#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
2778 &bfin_tdm_pcm,
2779#endif
2780
2781#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2782 &bfin_ac97_pcm,
2876#endif 2783#endif
2877 2784
2878#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE) 2785#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
@@ -2916,6 +2823,24 @@ static struct platform_device *stamp_devices[] __initdata = {
2916#endif 2823#endif
2917}; 2824};
2918 2825
2826static int __init net2272_init(void)
2827{
2828#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
2829 int ret;
2830
2831 ret = gpio_request(GPIO_PF6, "net2272");
2832 if (ret)
2833 return ret;
2834
2835 /* Reset the USB chip */
2836 gpio_direction_output(GPIO_PF6, 0);
2837 mdelay(2);
2838 gpio_set_value(GPIO_PF6, 1);
2839#endif
2840
2841 return 0;
2842}
2843
2919static int __init stamp_init(void) 2844static int __init stamp_init(void)
2920{ 2845{
2921 printk(KERN_INFO "%s(): registering device resources\n", __func__); 2846 printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -2926,6 +2851,9 @@ static int __init stamp_init(void)
2926 ARRAY_SIZE(bfin_i2c_board_info)); 2851 ARRAY_SIZE(bfin_i2c_board_info));
2927 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 2852 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
2928 2853
2854 if (net2272_init())
2855 pr_warning("unable to configure net2272; it probably won't work\n");
2856
2929 return 0; 2857 return 0;
2930} 2858}
2931 2859
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 164a7e02c022..9b7287abdfa1 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
62/* SPI flash chip (m25p64) */ 62/* SPI flash chip (m25p64) */
63static struct bfin5xx_spi_chip spi_flash_chip_info = { 63static struct bfin5xx_spi_chip spi_flash_chip_info = {
64 .enable_dma = 0, /* use dma transfer with this chip*/ 64 .enable_dma = 0, /* use dma transfer with this chip*/
65 .bits_per_word = 8,
66};
67#endif
68
69#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
70/* SPI ADC chip */
71static struct bfin5xx_spi_chip spi_adc_chip_info = {
72 .enable_dma = 1, /* use dma transfer with this chip*/
73 .bits_per_word = 16,
74};
75#endif
76
77#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
78static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
79 .enable_dma = 0,
80 .bits_per_word = 16,
81}; 65};
82#endif 66#endif
83 67
84#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 68#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
85static struct bfin5xx_spi_chip mmc_spi_chip_info = { 69static struct bfin5xx_spi_chip mmc_spi_chip_info = {
86 .enable_dma = 0, 70 .enable_dma = 0,
87 .bits_per_word = 8,
88}; 71};
89#endif 72#endif
90 73
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
102 }, 85 },
103#endif 86#endif
104 87
105#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
106 {
107 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
108 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
109 .bus_num = 0, /* Framework bus number */
110 .chip_select = 1, /* Framework chip select. */
111 .platform_data = NULL, /* No spi_driver specific config */
112 .controller_data = &spi_adc_chip_info,
113 },
114#endif
115
116#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 88#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
117 { 89 {
118 .modalias = "ad183x", 90 .modalias = "ad183x",
119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 91 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
120 .bus_num = 0, 92 .bus_num = 0,
121 .chip_select = 4, 93 .chip_select = 4,
122 .controller_data = &ad1836_spi_chip_info,
123 }, 94 },
124#endif 95#endif
125 96
@@ -733,6 +704,24 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
733#endif 704#endif
734}; 705};
735 706
707static int __init net2272_init(void)
708{
709#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
710 int ret;
711
712 ret = gpio_request(GPIO_PG14, "net2272");
713 if (ret)
714 return ret;
715
716 /* Reset USB Chip, PG14 */
717 gpio_direction_output(GPIO_PG14, 0);
718 mdelay(2);
719 gpio_set_value(GPIO_PG14, 1);
720#endif
721
722 return 0;
723}
724
736static int __init tcm_bf537_init(void) 725static int __init tcm_bf537_init(void)
737{ 726{
738 printk(KERN_INFO "%s(): registering device resources\n", __func__); 727 printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -744,6 +733,10 @@ static int __init tcm_bf537_init(void)
744#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 733#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
745 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 734 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
746#endif 735#endif
736
737 if (net2272_init())
738 pr_warning("unable to configure net2272; it probably won't work\n");
739
747 return 0; 740 return 0;
748} 741}
749 742
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 7f8e5a9f5db6..543cd3fb305e 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 05/25/2010; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List 14 * - Revision F, 05/23/2011; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -44,18 +44,12 @@
44#define ANOMALY_05000119 (1) 44#define ANOMALY_05000119 (1)
45/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 45/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
46#define ANOMALY_05000122 (1) 46#define ANOMALY_05000122 (1)
47/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
48#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
49/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */ 47/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
50#define ANOMALY_05000180 (1) 48#define ANOMALY_05000180 (1)
51/* Instruction Cache Is Not Functional */
52#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
53/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */ 49/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
54#define ANOMALY_05000244 (__SILICON_REVISION__ < 3) 50#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
55/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 51/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
56#define ANOMALY_05000245 (1) 52#define ANOMALY_05000245 (1)
57/* Buffered CLKIN Output Is Disabled by Default */
58#define ANOMALY_05000247 (1)
59/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */ 53/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
60#define ANOMALY_05000250 (__SILICON_REVISION__ < 3) 54#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
61/* EMAC TX DMA Error After an Early Frame Abort */ 55/* EMAC TX DMA Error After an Early Frame Abort */
@@ -98,7 +92,7 @@
98#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2)) 92#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
99/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */ 93/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
100#define ANOMALY_05000280 (1) 94#define ANOMALY_05000280 (1)
101/* False Hardware Error Exception when ISR Context Is Not Restored */ 95/* False Hardware Error when ISR Context Is Not Restored */
102#define ANOMALY_05000281 (__SILICON_REVISION__ < 3) 96#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
103/* Memory DMA Corruption with 32-Bit Data and Traffic Control */ 97/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
104#define ANOMALY_05000282 (__SILICON_REVISION__ < 3) 98#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
@@ -162,9 +156,9 @@
162#define ANOMALY_05000461 (1) 156#define ANOMALY_05000461 (1)
163/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 157/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
164#define ANOMALY_05000462 (1) 158#define ANOMALY_05000462 (1)
165/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 159/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
166#define ANOMALY_05000473 (1) 160#define ANOMALY_05000473 (1)
167/* Possible Lockup Condition whem Modifying PLL from External Memory */ 161/* Possible Lockup Condition when Modifying PLL from External Memory */
168#define ANOMALY_05000475 (1) 162#define ANOMALY_05000475 (1)
169/* TESTSET Instruction Cannot Be Interrupted */ 163/* TESTSET Instruction Cannot Be Interrupted */
170#define ANOMALY_05000477 (1) 164#define ANOMALY_05000477 (1)
@@ -172,8 +166,26 @@
172#define ANOMALY_05000480 (__SILICON_REVISION__ < 3) 166#define ANOMALY_05000480 (__SILICON_REVISION__ < 3)
173/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 167/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
174#define ANOMALY_05000481 (1) 168#define ANOMALY_05000481 (1)
175/* IFLUSH sucks at life */ 169/* PLL May Latch Incorrect Values Coming Out of Reset */
170#define ANOMALY_05000489 (1)
171/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
176#define ANOMALY_05000491 (1) 172#define ANOMALY_05000491 (1)
173/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
174#define ANOMALY_05000494 (1)
175/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
176#define ANOMALY_05000501 (1)
177
178/*
179 * These anomalies have been "phased" out of analog.com anomaly sheets and are
180 * here to show running on older silicon just isn't feasible.
181 */
182
183/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
184#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
185/* Instruction Cache Is Not Functional */
186#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
187/* Buffered CLKIN Output Is Disabled by Default */
188#define ANOMALY_05000247 (__SILICON_REVISION__ < 2)
177 189
178/* Anomalies that don't exist on this proc */ 190/* Anomalies that don't exist on this proc */
179#define ANOMALY_05000099 (0) 191#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e61424ef35eb..629f3c333415 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -502,7 +502,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
502 502
503static struct bfin5xx_spi_chip spi_flash_chip_info = { 503static struct bfin5xx_spi_chip spi_flash_chip_info = {
504 .enable_dma = 0, /* use dma transfer with this chip*/ 504 .enable_dma = 0, /* use dma transfer with this chip*/
505 .bits_per_word = 8,
506}; 505};
507#endif 506#endif
508 507
@@ -523,13 +522,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
523}; 522};
524#endif 523#endif
525 524
526#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
527static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
528 .enable_dma = 0,
529 .bits_per_word = 16,
530};
531#endif
532
533#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 525#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
534#include <asm/bfin-lq035q1.h> 526#include <asm/bfin-lq035q1.h>
535 527
@@ -559,20 +551,6 @@ static struct platform_device bfin_lq035q1_device = {
559}; 551};
560#endif 552#endif
561 553
562#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
563static struct bfin5xx_spi_chip spidev_chip_info = {
564 .enable_dma = 0,
565 .bits_per_word = 8,
566};
567#endif
568
569#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
570static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
571 .enable_dma = 0,
572 .bits_per_word = 8,
573};
574#endif
575
576static struct spi_board_info bf538_spi_board_info[] __initdata = { 554static struct spi_board_info bf538_spi_board_info[] __initdata = {
577#if defined(CONFIG_MTD_M25P80) \ 555#if defined(CONFIG_MTD_M25P80) \
578 || defined(CONFIG_MTD_M25P80_MODULE) 556 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -595,7 +573,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
595 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 573 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
596 .bus_num = 0, 574 .bus_num = 0,
597 .chip_select = 1, 575 .chip_select = 1,
598 .controller_data = &spi_ad7879_chip_info,
599 .mode = SPI_CPHA | SPI_CPOL, 576 .mode = SPI_CPHA | SPI_CPOL,
600 }, 577 },
601#endif 578#endif
@@ -605,7 +582,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
605 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 582 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
606 .bus_num = 0, 583 .bus_num = 0,
607 .chip_select = 2, 584 .chip_select = 2,
608 .controller_data = &lq035q1_spi_chip_info,
609 .mode = SPI_CPHA | SPI_CPOL, 585 .mode = SPI_CPHA | SPI_CPOL,
610 }, 586 },
611#endif 587#endif
@@ -615,7 +591,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
615 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 591 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
616 .bus_num = 0, 592 .bus_num = 0,
617 .chip_select = 1, 593 .chip_select = 1,
618 .controller_data = &spidev_chip_info,
619 }, 594 },
620#endif 595#endif
621}; 596};
diff --git a/arch/blackfin/mach-bf538/ext-gpio.c b/arch/blackfin/mach-bf538/ext-gpio.c
index 180b1252679f..471a9b184d5b 100644
--- a/arch/blackfin/mach-bf538/ext-gpio.c
+++ b/arch/blackfin/mach-bf538/ext-gpio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs 2 * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -121,3 +121,38 @@ static int __init bf538_extgpio_setup(void)
121 gpiochip_add(&bf538_porte_chip); 121 gpiochip_add(&bf538_porte_chip);
122} 122}
123arch_initcall(bf538_extgpio_setup); 123arch_initcall(bf538_extgpio_setup);
124
125#ifdef CONFIG_PM
126static struct {
127 u16 data, dir, inen;
128} gpio_bank_saved[3];
129
130static void __iomem * const port_bases[3] = {
131 (void *)PORTCIO,
132 (void *)PORTDIO,
133 (void *)PORTEIO,
134};
135
136void bfin_special_gpio_pm_hibernate_suspend(void)
137{
138 int i;
139
140 for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
141 gpio_bank_saved[i].data = read_PORTIO(port_bases[i]);
142 gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]);
143 gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]);
144 }
145}
146
147void bfin_special_gpio_pm_hibernate_restore(void)
148{
149 int i;
150
151 for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
152 write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen);
153 write_PORTIO_SET(port_bases[i],
154 gpio_bank_saved[i].data & gpio_bank_saved[i].dir);
155 write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir);
156 }
157}
158#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 55e7d0712a94..b6ca99788710 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -11,8 +11,8 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision I, 05/25/2010; ADSP-BF538/BF538F Blackfin Processor Anomaly List 14 * - Revision J, 05/23/2011; ADSP-BF538/BF538F Blackfin Processor Anomaly List
15 * - Revision N, 05/25/2010; ADSP-BF539/BF539F Blackfin Processor Anomaly List 15 * - Revision O, 05/23/2011; ADSP-BF539/BF539F Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -56,25 +56,21 @@
56#define ANOMALY_05000229 (1) 56#define ANOMALY_05000229 (1)
57/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */ 57/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */
58#define ANOMALY_05000233 (1) 58#define ANOMALY_05000233 (1)
59/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
60#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
61/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 59/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
62#define ANOMALY_05000245 (1) 60#define ANOMALY_05000245 (1)
63/* Maximum External Clock Speed for Timers */ 61/* Maximum External Clock Speed for Timers */
64#define ANOMALY_05000253 (1) 62#define ANOMALY_05000253 (1)
65/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
66#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
67/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */ 63/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
68#define ANOMALY_05000270 (__SILICON_REVISION__ < 4) 64#define ANOMALY_05000270 (__SILICON_REVISION__ < 4)
69/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */ 65/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
70#define ANOMALY_05000272 (1) 66#define ANOMALY_05000272 (ANOMALY_BF538)
71/* Writes to Synchronous SDRAM Memory May Be Lost */ 67/* Writes to Synchronous SDRAM Memory May Be Lost */
72#define ANOMALY_05000273 (__SILICON_REVISION__ < 4) 68#define ANOMALY_05000273 (__SILICON_REVISION__ < 4)
73/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */ 69/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
74#define ANOMALY_05000277 (__SILICON_REVISION__ < 4) 70#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
75/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 71/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
76#define ANOMALY_05000278 (__SILICON_REVISION__ < 4) 72#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
77/* False Hardware Error Exception when ISR Context Is Not Restored */ 73/* False Hardware Error when ISR Context Is Not Restored */
78#define ANOMALY_05000281 (__SILICON_REVISION__ < 4) 74#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
79/* Memory DMA Corruption with 32-Bit Data and Traffic Control */ 75/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
80#define ANOMALY_05000282 (__SILICON_REVISION__ < 4) 76#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
@@ -102,8 +98,10 @@
102#define ANOMALY_05000313 (__SILICON_REVISION__ < 4) 98#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
103/* Killed System MMR Write Completes Erroneously on Next System MMR Access */ 99/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
104#define ANOMALY_05000315 (__SILICON_REVISION__ < 4) 100#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
101/* PFx Glitch on Write to PORTFIO or PORTFIO_TOGGLE */
102#define ANOMALY_05000317 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000318 */
105/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */ 103/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
106#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4) 104#define ANOMALY_05000318 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000317 */
107/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */ 105/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
108#define ANOMALY_05000355 (__SILICON_REVISION__ < 5) 106#define ANOMALY_05000355 (__SILICON_REVISION__ < 5)
109/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ 107/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
@@ -134,16 +132,32 @@
134#define ANOMALY_05000461 (1) 132#define ANOMALY_05000461 (1)
135/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 133/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
136#define ANOMALY_05000462 (1) 134#define ANOMALY_05000462 (1)
137/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 135/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
138#define ANOMALY_05000473 (1) 136#define ANOMALY_05000473 (1)
139/* Possible Lockup Condition whem Modifying PLL from External Memory */ 137/* Possible Lockup Condition when Modifying PLL from External Memory */
140#define ANOMALY_05000475 (1) 138#define ANOMALY_05000475 (1)
141/* TESTSET Instruction Cannot Be Interrupted */ 139/* TESTSET Instruction Cannot Be Interrupted */
142#define ANOMALY_05000477 (1) 140#define ANOMALY_05000477 (1)
143/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 141/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
144#define ANOMALY_05000481 (1) 142#define ANOMALY_05000481 (1)
145/* IFLUSH sucks at life */ 143/* PLL May Latch Incorrect Values Coming Out of Reset */
144#define ANOMALY_05000489 (1)
145/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
146#define ANOMALY_05000491 (1) 146#define ANOMALY_05000491 (1)
147/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
148#define ANOMALY_05000494 (1)
149/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
150#define ANOMALY_05000501 (1)
151
152/*
153 * These anomalies have been "phased" out of analog.com anomaly sheets and are
154 * here to show running on older silicon just isn't feasible.
155 */
156
157/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
158#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
159/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
160#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
147 161
148/* Anomalies that don't exist on this proc */ 162/* Anomalies that don't exist on this proc */
149#define ANOMALY_05000099 (0) 163#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index 8a5beeece996..3561c7d8935b 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -8,7 +8,10 @@
8#define _MACH_GPIO_H_ 8#define _MACH_GPIO_H_
9 9
10#define MAX_BLACKFIN_GPIOS 16 10#define MAX_BLACKFIN_GPIOS 16
11#ifdef CONFIG_GPIOLIB
12/* We only use the special logic with GPIOLIB devices */
11#define BFIN_SPECIAL_GPIO_BANKS 3 13#define BFIN_SPECIAL_GPIO_BANKS 3
14#endif
12 15
13#define GPIO_PF0 0 /* PF */ 16#define GPIO_PF0 0 /* PF */
14#define GPIO_PF1 1 17#define GPIO_PF1 1
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index d11502ac5623..212b9e0a08c8 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -861,16 +861,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
861 861
862static struct bfin5xx_spi_chip spi_flash_chip_info = { 862static struct bfin5xx_spi_chip spi_flash_chip_info = {
863 .enable_dma = 0, /* use dma transfer with this chip*/ 863 .enable_dma = 0, /* use dma transfer with this chip*/
864 .bits_per_word = 8,
865}; 864};
866#endif 865#endif
867 866
868#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 867#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
869static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
870 .enable_dma = 0,
871 .bits_per_word = 16,
872};
873
874static const struct ad7877_platform_data bfin_ad7877_ts_info = { 868static const struct ad7877_platform_data bfin_ad7877_ts_info = {
875 .model = 7877, 869 .model = 7877,
876 .vref_delay_usecs = 50, /* internal, no capacitor */ 870 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -886,13 +880,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
886}; 880};
887#endif 881#endif
888 882
889#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
890static struct bfin5xx_spi_chip spidev_chip_info = {
891 .enable_dma = 0,
892 .bits_per_word = 8,
893};
894#endif
895
896static struct spi_board_info bf54x_spi_board_info[] __initdata = { 883static struct spi_board_info bf54x_spi_board_info[] __initdata = {
897#if defined(CONFIG_MTD_M25P80) \ 884#if defined(CONFIG_MTD_M25P80) \
898 || defined(CONFIG_MTD_M25P80_MODULE) 885 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -915,7 +902,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
915 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 902 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
916 .bus_num = 0, 903 .bus_num = 0,
917 .chip_select = 2, 904 .chip_select = 2,
918 .controller_data = &spi_ad7877_chip_info,
919}, 905},
920#endif 906#endif
921#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 907#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -924,7 +910,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
924 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 910 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
925 .bus_num = 0, 911 .bus_num = 0,
926 .chip_select = 1, 912 .chip_select = 1,
927 .controller_data = &spidev_chip_info,
928 }, 913 },
929#endif 914#endif
930}; 915};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 311bf9970fe7..cd9cbb68de69 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -1018,24 +1018,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
1018 1018
1019static struct bfin5xx_spi_chip spi_flash_chip_info = { 1019static struct bfin5xx_spi_chip spi_flash_chip_info = {
1020 .enable_dma = 0, /* use dma transfer with this chip*/ 1020 .enable_dma = 0, /* use dma transfer with this chip*/
1021 .bits_per_word = 8,
1022};
1023#endif
1024
1025#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
1026 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
1027static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
1028 .enable_dma = 0,
1029 .bits_per_word = 16,
1030}; 1021};
1031#endif 1022#endif
1032 1023
1033#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1024#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
1034static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
1035 .enable_dma = 0,
1036 .bits_per_word = 16,
1037};
1038
1039static const struct ad7877_platform_data bfin_ad7877_ts_info = { 1025static const struct ad7877_platform_data bfin_ad7877_ts_info = {
1040 .model = 7877, 1026 .model = 7877,
1041 .vref_delay_usecs = 50, /* internal, no capacitor */ 1027 .vref_delay_usecs = 50, /* internal, no capacitor */
@@ -1051,20 +1037,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
1051}; 1037};
1052#endif 1038#endif
1053 1039
1054#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
1055static struct bfin5xx_spi_chip spidev_chip_info = {
1056 .enable_dma = 0,
1057 .bits_per_word = 8,
1058};
1059#endif
1060
1061#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
1062static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
1063 .enable_dma = 0, /* use dma transfer with this chip*/
1064 .bits_per_word = 8,
1065};
1066#endif
1067
1068static struct spi_board_info bfin_spi_board_info[] __initdata = { 1040static struct spi_board_info bfin_spi_board_info[] __initdata = {
1069#if defined(CONFIG_MTD_M25P80) \ 1041#if defined(CONFIG_MTD_M25P80) \
1070 || defined(CONFIG_MTD_M25P80_MODULE) 1042 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -1086,7 +1058,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1086 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1058 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
1087 .bus_num = 1, 1059 .bus_num = 1,
1088 .chip_select = 4, 1060 .chip_select = 4,
1089 .controller_data = &ad1836_spi_chip_info,
1090 }, 1061 },
1091#endif 1062#endif
1092#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) 1063#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
@@ -1097,7 +1068,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1097 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ 1068 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
1098 .bus_num = 0, 1069 .bus_num = 0,
1099 .chip_select = 2, 1070 .chip_select = 2,
1100 .controller_data = &spi_ad7877_chip_info,
1101 }, 1071 },
1102#endif 1072#endif
1103#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 1073#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1106,7 +1076,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1106 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1076 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
1107 .bus_num = 0, 1077 .bus_num = 0,
1108 .chip_select = 1, 1078 .chip_select = 1,
1109 .controller_data = &spidev_chip_info,
1110 }, 1079 },
1111#endif 1080#endif
1112#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) 1081#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
@@ -1117,7 +1086,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1117 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ 1086 .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
1118 .bus_num = 1, 1087 .bus_num = 1,
1119 .chip_select = 2, 1088 .chip_select = 2,
1120 .controller_data = &spi_adxl34x_chip_info,
1121 .mode = SPI_MODE_3, 1089 .mode = SPI_MODE_3,
1122 }, 1090 },
1123#endif 1091#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 9e70785bdde3..ac96ee83b00e 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision J, 06/03/2010; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List 14 * - Revision K, 05/23/2011; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -29,117 +29,37 @@
29/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 29/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
30#define ANOMALY_05000122 (1) 30#define ANOMALY_05000122 (1)
31/* Data Corruption/Core Hang with L2/L3 Configured in Writeback Cache Mode */ 31/* Data Corruption/Core Hang with L2/L3 Configured in Writeback Cache Mode */
32#define ANOMALY_05000220 (1) 32#define ANOMALY_05000220 (__SILICON_REVISION__ < 4)
33/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 33/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
34#define ANOMALY_05000245 (1) 34#define ANOMALY_05000245 (1)
35/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 35/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
36#define ANOMALY_05000265 (1) 36#define ANOMALY_05000265 (1)
37/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */ 37/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
38#define ANOMALY_05000272 (1) 38#define ANOMALY_05000272 (1)
39/* False Hardware Error Exception when ISR Context Is Not Restored */
40#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
41/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
42#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
43/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */ 39/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
44#define ANOMALY_05000310 (1) 40#define ANOMALY_05000310 (1)
45/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
46#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
47/* TWI Slave Boot Mode Is Not Functional */
48#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
49/* FIFO Boot Mode Not Functional */ 41/* FIFO Boot Mode Not Functional */
50#define ANOMALY_05000325 (__SILICON_REVISION__ < 2) 42#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
51/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
52#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
53/* Incorrect Access of OTP_STATUS During otp_write() Function */
54#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
55/* Synchronous Burst Flash Boot Mode Is Not Functional */
56#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
57/* Host DMA Boot Modes Are Not Functional */
58#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
59/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
60#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
61/* Inadequate Rotary Debounce Logic Duration */
62#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
63/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
64#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
65/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
66#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
67/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
68#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
69/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
70#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
71/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
72#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
73/* USB Calibration Value Is Not Initialized */
74#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
75/* USB Calibration Value to use */
76#define ANOMALY_05000346_value 0x5411
77/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
78#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
79/* Data Lost when Core Reads SDH Data FIFO */
80#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
81/* PLL Status Register Is Inaccurate */
82#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
83/* bfrom_SysControl() Firmware Function Performs Improper System Reset */ 43/* bfrom_SysControl() Firmware Function Performs Improper System Reset */
84/* 44/*
85 * Note: anomaly sheet says this is fixed with bf54x-0.2+, but testing 45 * Note: anomaly sheet says this is fixed with bf54x-0.2+, but testing
86 * shows that the fix itself does not cover all cases. 46 * shows that the fix itself does not cover all cases.
87 */ 47 */
88#define ANOMALY_05000353 (1) 48#define ANOMALY_05000353 (1)
89/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
90#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
91/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
92#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
93/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */ 49/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
94#define ANOMALY_05000357 (1) 50#define ANOMALY_05000357 (1)
95/* External Memory Read Access Hangs Core With PLL Bypass */ 51/* External Memory Read Access Hangs Core With PLL Bypass */
96#define ANOMALY_05000360 (1) 52#define ANOMALY_05000360 (1)
97/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */ 53/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */
98#define ANOMALY_05000365 (1) 54#define ANOMALY_05000365 (1)
99/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
100#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
101/* Addressing Conflict between Boot ROM and Asynchronous Memory */ 55/* Addressing Conflict between Boot ROM and Asynchronous Memory */
102#define ANOMALY_05000369 (1) 56#define ANOMALY_05000369 (1)
103/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
104#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
105/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */ 57/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
106#define ANOMALY_05000371 (__SILICON_REVISION__ < 2) 58#define ANOMALY_05000371 (__SILICON_REVISION__ < 2)
107/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
108#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
109/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */ 59/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */
110#define ANOMALY_05000378 (__SILICON_REVISION__ < 2) 60#define ANOMALY_05000378 (__SILICON_REVISION__ < 2)
111/* 16-Bit NAND FLASH Boot Mode Is Not Functional */ 61/* 16-Bit NAND FLASH Boot Mode Is Not Functional */
112#define ANOMALY_05000379 (1) 62#define ANOMALY_05000379 (1)
113/* 8-Bit NAND Flash Boot Mode Not Functional */
114#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
115/* Some ATAPI Modes Are Not Functional */
116#define ANOMALY_05000383 (1)
117/* Boot from OTP Memory Not Functional */
118#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
119/* bfrom_SysControl() Firmware Routine Not Functional */
120#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
121/* Programmable Preboot Settings Not Functional */
122#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
123/* CRC32 Checksum Support Not Functional */
124#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
125/* Reset Vector Must Not Be in SDRAM Memory Space */
126#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
127/* Changed Meaning of BCODE Field in SYSCR Register */
128#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
129/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
130#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
131/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
132#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
133/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
134#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
135/* Log Buffer Not Functional */
136#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
137/* Hook Routine Not Functional */
138#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
139/* Header Indirect Bit Not Functional */
140#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
141/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
142#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
143/* Lockbox SESR Disallows Certain User Interrupts */ 63/* Lockbox SESR Disallows Certain User Interrupts */
144#define ANOMALY_05000404 (__SILICON_REVISION__ < 2) 64#define ANOMALY_05000404 (__SILICON_REVISION__ < 2)
145/* Lockbox SESR Firmware Does Not Save/Restore Full Context */ 65/* Lockbox SESR Firmware Does Not Save/Restore Full Context */
@@ -161,7 +81,7 @@
161/* Speculative Fetches Can Cause Undesired External FIFO Operations */ 81/* Speculative Fetches Can Cause Undesired External FIFO Operations */
162#define ANOMALY_05000416 (1) 82#define ANOMALY_05000416 (1)
163/* Multichannel SPORT Channel Misalignment Under Specific Configuration */ 83/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
164#define ANOMALY_05000425 (1) 84#define ANOMALY_05000425 (__SILICON_REVISION__ < 4)
165/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */ 85/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
166#define ANOMALY_05000426 (1) 86#define ANOMALY_05000426 (1)
167/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */ 87/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */
@@ -174,8 +94,6 @@
174#define ANOMALY_05000431 (__SILICON_REVISION__ < 3) 94#define ANOMALY_05000431 (__SILICON_REVISION__ < 3)
175/* SW Breakpoints Ignored Upon Return From Lockbox Authentication */ 95/* SW Breakpoints Ignored Upon Return From Lockbox Authentication */
176#define ANOMALY_05000434 (1) 96#define ANOMALY_05000434 (1)
177/* OTP Write Accesses Not Supported */
178#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
179/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 97/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
180#define ANOMALY_05000443 (1) 98#define ANOMALY_05000443 (1)
181/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */ 99/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */
@@ -186,34 +104,32 @@
186#define ANOMALY_05000448 (__SILICON_REVISION__ == 1) 104#define ANOMALY_05000448 (__SILICON_REVISION__ == 1)
187/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */ 105/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
188#define ANOMALY_05000449 (__SILICON_REVISION__ == 1) 106#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
189/* USB DMA Mode 1 Short Packet Data Corruption */ 107/* USB DMA Short Packet Data Corruption */
190#define ANOMALY_05000450 (1) 108#define ANOMALY_05000450 (1)
191/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
192#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
193/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */ 109/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
194#define ANOMALY_05000456 (1) 110#define ANOMALY_05000456 (1)
195/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */ 111/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
196#define ANOMALY_05000457 (1) 112#define ANOMALY_05000457 (1)
197/* USB DMA Mode 1 Failure When Multiple USB DMA Channels Are Concurrently Enabled */ 113/* USB DMA Mode 1 Failure When Multiple USB DMA Channels Are Concurrently Enabled */
198#define ANOMALY_05000460 (1) 114#define ANOMALY_05000460 (__SILICON_REVISION__ < 4)
199/* False Hardware Error when RETI Points to Invalid Memory */ 115/* False Hardware Error when RETI Points to Invalid Memory */
200#define ANOMALY_05000461 (1) 116#define ANOMALY_05000461 (1)
201/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 117/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
202#define ANOMALY_05000462 (1) 118#define ANOMALY_05000462 (__SILICON_REVISION__ < 4)
203/* USB DMA RX Data Corruption */ 119/* USB DMA RX Data Corruption */
204#define ANOMALY_05000463 (1) 120#define ANOMALY_05000463 (__SILICON_REVISION__ < 4)
205/* USB TX DMA Hang */ 121/* USB TX DMA Hang */
206#define ANOMALY_05000464 (1) 122#define ANOMALY_05000464 (__SILICON_REVISION__ < 4)
207/* USB Rx DMA hang */ 123/* USB Rx DMA Hang */
208#define ANOMALY_05000465 (1) 124#define ANOMALY_05000465 (1)
209/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */ 125/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
210#define ANOMALY_05000466 (1) 126#define ANOMALY_05000466 (__SILICON_REVISION__ < 4)
211/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */ 127/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
212#define ANOMALY_05000467 (1) 128#define ANOMALY_05000467 (__SILICON_REVISION__ < 4)
213/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 129/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
214#define ANOMALY_05000473 (1) 130#define ANOMALY_05000473 (1)
215/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */ 131/* Access to DDR SDRAM Causes System Hang with Certain PLL Settings */
216#define ANOMALY_05000474 (1) 132#define ANOMALY_05000474 (__SILICON_REVISION__ < 4)
217/* TESTSET Instruction Cannot Be Interrupted */ 133/* TESTSET Instruction Cannot Be Interrupted */
218#define ANOMALY_05000477 (1) 134#define ANOMALY_05000477 (1)
219/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 135/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
@@ -223,9 +139,111 @@
223/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */ 139/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */
224#define ANOMALY_05000484 (__SILICON_REVISION__ < 3) 140#define ANOMALY_05000484 (__SILICON_REVISION__ < 3)
225/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 141/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
226#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2) 142#define ANOMALY_05000485 (__SILICON_REVISION__ > 1 && __SILICON_REVISION__ < 4)
227/* IFLUSH sucks at life */ 143/* PLL May Latch Incorrect Values Coming Out of Reset */
144#define ANOMALY_05000489 (1)
145/* SPI Master Boot Can Fail Under Certain Conditions */
146#define ANOMALY_05000490 (1)
147/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
228#define ANOMALY_05000491 (1) 148#define ANOMALY_05000491 (1)
149/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
150#define ANOMALY_05000494 (1)
151/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
152#define ANOMALY_05000498 (1)
153/* Nand Flash Controller Hangs When the AMC Requests the Async Pins During the last 16 Bytes of a Page Write Operation. */
154#define ANOMALY_05000500 (1)
155/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
156#define ANOMALY_05000501 (1)
157/* Async Memory Writes May Be Skipped When Using Odd Clock Ratios */
158#define ANOMALY_05000502 (1)
159
160/*
161 * These anomalies have been "phased" out of analog.com anomaly sheets and are
162 * here to show running on older silicon just isn't feasible.
163 */
164
165/* False Hardware Error when ISR Context Is Not Restored */
166#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
167/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
168#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
169/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
170#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
171/* TWI Slave Boot Mode Is Not Functional */
172#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
173/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
174#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
175/* Incorrect Access of OTP_STATUS During otp_write() Function */
176#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
177/* Synchronous Burst Flash Boot Mode Is Not Functional */
178#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
179/* Host DMA Boot Modes Are Not Functional */
180#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
181/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
182#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
183/* Inadequate Rotary Debounce Logic Duration */
184#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
185/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
186#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
187/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
188#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
189/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
190#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
191/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
192#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
193/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
194#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
195/* USB Calibration Value Is Not Initialized */
196#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
197/* USB Calibration Value to use */
198#define ANOMALY_05000346_value 0x5411
199/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
200#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
201/* Data Lost when Core Reads SDH Data FIFO */
202#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
203/* PLL Status Register Is Inaccurate */
204#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
205/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
206#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
207/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
208#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
209/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
210#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
211/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
212#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
213/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
214#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
215/* 8-Bit NAND Flash Boot Mode Not Functional */
216#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
217/* Boot from OTP Memory Not Functional */
218#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
219/* bfrom_SysControl() Firmware Routine Not Functional */
220#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
221/* Programmable Preboot Settings Not Functional */
222#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
223/* CRC32 Checksum Support Not Functional */
224#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
225/* Reset Vector Must Not Be in SDRAM Memory Space */
226#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
227/* Changed Meaning of BCODE Field in SYSCR Register */
228#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
229/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
230#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
231/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
232#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
233/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
234#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
235/* Log Buffer Not Functional */
236#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
237/* Hook Routine Not Functional */
238#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
239/* Header Indirect Bit Not Functional */
240#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
241/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
242#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
243/* OTP Write Accesses Not Supported */
244#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
245/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
246#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
229 247
230/* Anomalies that don't exist on this proc */ 248/* Anomalies that don't exist on this proc */
231#define ANOMALY_05000099 (0) 249#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 7db433514e3f..35c8ced46158 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -170,6 +170,8 @@
170 170
171#define MAX_BLACKFIN_GPIOS 160 171#define MAX_BLACKFIN_GPIOS 160
172 172
173#define BFIN_GPIO_PINT 1
174
173#ifndef __ASSEMBLY__ 175#ifndef __ASSEMBLY__
174 176
175struct gpio_port_t { 177struct gpio_port_t {
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 533b8095b540..10dc142c518d 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -438,7 +438,7 @@
438struct bfin_pint_regs { 438struct bfin_pint_regs {
439 u32 mask_set; 439 u32 mask_set;
440 u32 mask_clear; 440 u32 mask_clear;
441 u32 irq; 441 u32 request;
442 u32 assign; 442 u32 assign;
443 u32 edge_set; 443 u32 edge_set;
444 u32 edge_clear; 444 u32 edge_clear;
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 9231a942892b..972e1347c6bc 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -364,14 +364,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
364/* DataFlash chip */ 364/* DataFlash chip */
365static struct bfin5xx_spi_chip data_flash_chip_info = { 365static struct bfin5xx_spi_chip data_flash_chip_info = {
366 .enable_dma = 0, /* use dma transfer with this chip */ 366 .enable_dma = 0, /* use dma transfer with this chip */
367 .bits_per_word = 8,
368};
369#endif
370
371#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
372static struct bfin5xx_spi_chip spidev_chip_info = {
373 .enable_dma = 0,
374 .bits_per_word = 8,
375}; 367};
376#endif 368#endif
377 369
@@ -420,7 +412,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
420 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 412 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
421 .bus_num = 0, 413 .bus_num = 0,
422 .chip_select = 3, 414 .chip_select = 3,
423 .controller_data = &spidev_chip_info,
424 }, 415 },
425#endif 416#endif
426#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) 417#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 87595cd38afe..e4f397d1d65b 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -60,29 +60,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
60/* SPI flash chip (m25p64) */ 60/* SPI flash chip (m25p64) */
61static struct bfin5xx_spi_chip spi_flash_chip_info = { 61static struct bfin5xx_spi_chip spi_flash_chip_info = {
62 .enable_dma = 0, /* use dma transfer with this chip*/ 62 .enable_dma = 0, /* use dma transfer with this chip*/
63 .bits_per_word = 8,
64};
65#endif
66
67#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
68/* SPI ADC chip */
69static struct bfin5xx_spi_chip spi_adc_chip_info = {
70 .enable_dma = 1, /* use dma transfer with this chip*/
71 .bits_per_word = 16,
72};
73#endif
74
75#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
76static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
77 .enable_dma = 0,
78 .bits_per_word = 16,
79};
80#endif
81
82#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
83static struct bfin5xx_spi_chip mmc_spi_chip_info = {
84 .enable_dma = 0,
85 .bits_per_word = 8,
86}; 63};
87#endif 64#endif
88 65
@@ -100,24 +77,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
100 }, 77 },
101#endif 78#endif
102 79
103#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
104 {
105 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
106 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
107 .bus_num = 0, /* Framework bus number */
108 .chip_select = 1, /* Framework chip select. */
109 .platform_data = NULL, /* No spi_driver specific config */
110 .controller_data = &spi_adc_chip_info,
111 },
112#endif
113
114#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) 80#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
115 { 81 {
116 .modalias = "ad183x", 82 .modalias = "ad183x",
117 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 83 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
118 .bus_num = 0, 84 .bus_num = 0,
119 .chip_select = 4, 85 .chip_select = 4,
120 .controller_data = &ad1836_spi_chip_info,
121 }, 86 },
122#endif 87#endif
123#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 88#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -126,7 +91,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
126 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ 91 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
127 .bus_num = 0, 92 .bus_num = 0,
128 .chip_select = 1, 93 .chip_select = 1,
129 .controller_data = &mmc_spi_chip_info,
130 .mode = SPI_MODE_3, 94 .mode = SPI_MODE_3,
131 }, 95 },
132#endif 96#endif
@@ -532,6 +496,24 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
532#endif 496#endif
533}; 497};
534 498
499static int __init net2272_init(void)
500{
501#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
502 int ret;
503
504 ret = gpio_request(GPIO_PF46, "net2272");
505 if (ret)
506 return ret;
507
508 /* Reset USB Chip, PF46 */
509 gpio_direction_output(GPIO_PF46, 0);
510 mdelay(2);
511 gpio_set_value(GPIO_PF46, 1);
512#endif
513
514 return 0;
515}
516
535static int __init cm_bf561_init(void) 517static int __init cm_bf561_init(void)
536{ 518{
537 printk(KERN_INFO "%s(): registering device resources\n", __func__); 519 printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -543,6 +525,10 @@ static int __init cm_bf561_init(void)
543#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 525#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
544 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); 526 irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
545#endif 527#endif
528
529 if (net2272_init())
530 pr_warning("unable to configure net2272; it probably won't work\n");
531
546 return 0; 532 return 0;
547} 533}
548 534
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 5067984a62e7..9490dc800ca5 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -108,6 +108,9 @@ static struct resource net2272_bfin_resources[] = {
108 .end = 0x2C000000 + 0x7F, 108 .end = 0x2C000000 + 0x7F,
109 .flags = IORESOURCE_MEM, 109 .flags = IORESOURCE_MEM,
110 }, { 110 }, {
111 .start = 1,
112 .flags = IORESOURCE_BUS,
113 }, {
111 .start = IRQ_PF10, 114 .start = IRQ_PF10,
112 .end = IRQ_PF10, 115 .end = IRQ_PF10,
113 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, 116 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
@@ -283,21 +286,6 @@ static struct platform_device ezkit_flash_device = {
283}; 286};
284#endif 287#endif
285 288
286#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
287 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
288static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
289 .enable_dma = 0,
290 .bits_per_word = 16,
291};
292#endif
293
294#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
295static struct bfin5xx_spi_chip spidev_chip_info = {
296 .enable_dma = 0,
297 .bits_per_word = 8,
298};
299#endif
300
301#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 289#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
302/* SPI (0) */ 290/* SPI (0) */
303static struct resource bfin_spi0_resource[] = { 291static struct resource bfin_spi0_resource[] = {
@@ -345,7 +333,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
345 .bus_num = 0, 333 .bus_num = 0,
346 .chip_select = 4, 334 .chip_select = 4,
347 .platform_data = "ad1836", /* only includes chip name for the moment */ 335 .platform_data = "ad1836", /* only includes chip name for the moment */
348 .controller_data = &ad1836_spi_chip_info,
349 .mode = SPI_MODE_3, 336 .mode = SPI_MODE_3,
350 }, 337 },
351#endif 338#endif
@@ -355,7 +342,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
355 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 342 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
356 .bus_num = 0, 343 .bus_num = 0,
357 .chip_select = 1, 344 .chip_select = 1,
358 .controller_data = &spidev_chip_info,
359 }, 345 },
360#endif 346#endif
361}; 347};
@@ -516,6 +502,24 @@ static struct platform_device *ezkit_devices[] __initdata = {
516#endif 502#endif
517}; 503};
518 504
505static int __init net2272_init(void)
506{
507#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
508 int ret;
509
510 ret = gpio_request(GPIO_PF11, "net2272");
511 if (ret)
512 return ret;
513
514 /* Reset the USB chip */
515 gpio_direction_output(GPIO_PF11, 0);
516 mdelay(2);
517 gpio_set_value(GPIO_PF11, 1);
518#endif
519
520 return 0;
521}
522
519static int __init ezkit_init(void) 523static int __init ezkit_init(void)
520{ 524{
521 int ret; 525 int ret;
@@ -542,6 +546,9 @@ static int __init ezkit_init(void)
542 udelay(400); 546 udelay(400);
543#endif 547#endif
544 548
549 if (net2272_init())
550 pr_warning("unable to configure net2272; it probably won't work\n");
551
545 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 552 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
546 return 0; 553 return 0;
547} 554}
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 22b5ab773027..836baeed303a 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -11,7 +11,7 @@
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision R, 05/25/2010; ADSP-BF561 Blackfin Processor Anomaly List 14 * - Revision S, 05/23/2011; ADSP-BF561 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -26,62 +26,16 @@
26#define ANOMALY_05000074 (1) 26#define ANOMALY_05000074 (1)
27/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */ 27/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
28#define ANOMALY_05000099 (__SILICON_REVISION__ < 5) 28#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
29/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
30#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
31/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */ 29/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
32#define ANOMALY_05000120 (1) 30#define ANOMALY_05000120 (1)
33/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 31/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
34#define ANOMALY_05000122 (1) 32#define ANOMALY_05000122 (1)
35/* Erroneous Exception when Enabling Cache */
36#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
37/* SIGNBITS Instruction Not Functional under Certain Conditions */ 33/* SIGNBITS Instruction Not Functional under Certain Conditions */
38#define ANOMALY_05000127 (1) 34#define ANOMALY_05000127 (1)
39/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
40#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
41/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
42#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
43/* Stall in multi-unit DMA operations */
44#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
45/* Allowing the SPORT RX FIFO to fill will cause an overflow */
46#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
47/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
48#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
49/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
50#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
51/* DMA and TESTSET conflict when both are accessing external memory */
52#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
53/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
54#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
55/* MDMA may lose the first few words of a descriptor chain */
56#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
57/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
58#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
59/* IMDMA S1/D1 Channel May Stall */ 35/* IMDMA S1/D1 Channel May Stall */
60#define ANOMALY_05000149 (1) 36#define ANOMALY_05000149 (1)
61/* DMA engine may lose data due to incorrect handshaking */
62#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
63/* DMA stalls when all three controllers read data from the same source */
64#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
65/* Execution stall when executing in L2 and doing external accesses */
66#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
67/* Frame Delay in SPORT Multichannel Mode */
68#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
69/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
70#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
71/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */ 37/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
72#define ANOMALY_05000156 (__SILICON_REVISION__ < 4) 38#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
73/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
74#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
75/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
76#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
77/* A read from external memory may return a wrong value with data cache enabled */
78#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
79/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
80#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
81/* DMEM_CONTROL<12> is not set on Reset */
82#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
83/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
84#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
85/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */ 39/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
86#define ANOMALY_05000166 (1) 40#define ANOMALY_05000166 (1)
87/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */ 41/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
@@ -92,10 +46,6 @@
92#define ANOMALY_05000169 (__SILICON_REVISION__ < 5) 46#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
93/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */ 47/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
94#define ANOMALY_05000171 (__SILICON_REVISION__ < 5) 48#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
95/* DSPID register values incorrect */
96#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
97/* DMA vs Core accesses to external memory */
98#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
99/* Cache Fill Buffer Data lost */ 49/* Cache Fill Buffer Data lost */
100#define ANOMALY_05000174 (__SILICON_REVISION__ < 5) 50#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
101/* Overlapping Sequencer and Memory Stalls */ 51/* Overlapping Sequencer and Memory Stalls */
@@ -124,8 +74,6 @@
124#define ANOMALY_05000189 (__SILICON_REVISION__ < 5) 74#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
125/* PPI Not Functional at Core Voltage < 1Volt */ 75/* PPI Not Functional at Core Voltage < 1Volt */
126#define ANOMALY_05000190 (1) 76#define ANOMALY_05000190 (1)
127/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
128#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
129/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */ 77/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
130#define ANOMALY_05000193 (__SILICON_REVISION__ < 5) 78#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
131/* Restarting SPORT in Specific Modes May Cause Data Corruption */ 79/* Restarting SPORT in Specific Modes May Cause Data Corruption */
@@ -217,10 +165,10 @@
217/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */ 165/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */
218#define ANOMALY_05000276 (__SILICON_REVISION__ < 5) 166#define ANOMALY_05000276 (__SILICON_REVISION__ < 5)
219/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */ 167/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
220#define ANOMALY_05000277 (__SILICON_REVISION__ < 3) 168#define ANOMALY_05000277 (__SILICON_REVISION__ < 5)
221/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 169/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
222#define ANOMALY_05000278 (__SILICON_REVISION__ < 5) 170#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
223/* False Hardware Error Exception when ISR Context Is Not Restored */ 171/* False Hardware Error when ISR Context Is Not Restored */
224/* Temporarily walk around for bug 5423 till this issue is confirmed by 172/* Temporarily walk around for bug 5423 till this issue is confirmed by
225 * official anomaly document. It looks 05000281 still exists on bf561 173 * official anomaly document. It looks 05000281 still exists on bf561
226 * v0.5. 174 * v0.5.
@@ -274,8 +222,6 @@
274#define ANOMALY_05000366 (1) 222#define ANOMALY_05000366 (1)
275/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */ 223/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
276#define ANOMALY_05000371 (1) 224#define ANOMALY_05000371 (1)
277/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
278#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
279/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */ 225/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
280#define ANOMALY_05000403 (1) 226#define ANOMALY_05000403 (1)
281/* TESTSET Instruction Causes Data Corruption with Writeback Data Cache Enabled */ 227/* TESTSET Instruction Causes Data Corruption with Writeback Data Cache Enabled */
@@ -298,16 +244,82 @@
298#define ANOMALY_05000462 (1) 244#define ANOMALY_05000462 (1)
299/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */ 245/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
300#define ANOMALY_05000471 (1) 246#define ANOMALY_05000471 (1)
301/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 247/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
302#define ANOMALY_05000473 (1) 248#define ANOMALY_05000473 (1)
303/* Possible Lockup Condition whem Modifying PLL from External Memory */ 249/* Possible Lockup Condition when Modifying PLL from External Memory */
304#define ANOMALY_05000475 (1) 250#define ANOMALY_05000475 (1)
305/* TESTSET Instruction Cannot Be Interrupted */ 251/* TESTSET Instruction Cannot Be Interrupted */
306#define ANOMALY_05000477 (1) 252#define ANOMALY_05000477 (1)
307/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 253/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
308#define ANOMALY_05000481 (1) 254#define ANOMALY_05000481 (1)
309/* IFLUSH sucks at life */ 255/* PLL May Latch Incorrect Values Coming Out of Reset */
256#define ANOMALY_05000489 (1)
257/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
310#define ANOMALY_05000491 (1) 258#define ANOMALY_05000491 (1)
259/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
260#define ANOMALY_05000494 (1)
261/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
262#define ANOMALY_05000501 (1)
263
264/*
265 * These anomalies have been "phased" out of analog.com anomaly sheets and are
266 * here to show running on older silicon just isn't feasible.
267 */
268
269/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
270#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
271/* Erroneous Exception when Enabling Cache */
272#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
273/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
274#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
275/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
276#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
277/* Stall in multi-unit DMA operations */
278#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
279/* Allowing the SPORT RX FIFO to fill will cause an overflow */
280#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
281/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
282#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
283/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
284#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
285/* DMA and TESTSET conflict when both are accessing external memory */
286#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
287/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
288#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
289/* MDMA may lose the first few words of a descriptor chain */
290#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
291/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
292#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
293/* DMA engine may lose data due to incorrect handshaking */
294#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
295/* DMA stalls when all three controllers read data from the same source */
296#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
297/* Execution stall when executing in L2 and doing external accesses */
298#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
299/* Frame Delay in SPORT Multichannel Mode */
300#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
301/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
302#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
303/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
304#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
305/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
306#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
307/* A read from external memory may return a wrong value with data cache enabled */
308#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
309/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
310#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
311/* DMEM_CONTROL<12> is not set on Reset */
312#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
313/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
314#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
315/* DSPID register values incorrect */
316#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
317/* DMA vs Core accesses to external memory */
318#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
319/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
320#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
321/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
322#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
311 323
312/* Anomalies that don't exist on this proc */ 324/* Anomalies that don't exist on this proc */
313#define ANOMALY_05000119 (0) 325#define ANOMALY_05000119 (0)
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 57d5eab59faf..f9f8b2adf4ba 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -58,9 +58,9 @@
58#define GPIO_PF46 46 58#define GPIO_PF46 46
59#define GPIO_PF47 47 59#define GPIO_PF47 47
60 60
61#define PORT_FIO0 GPIO_0 61#define PORT_FIO0 GPIO_PF0
62#define PORT_FIO1 GPIO_16 62#define PORT_FIO1 GPIO_PF16
63#define PORT_FIO2 GPIO_32 63#define PORT_FIO2 GPIO_PF32
64 64
65#include <mach-common/ports-f.h> 65#include <mach-common/ports-f.h>
66 66
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S
index 4c462838f4e1..01e5408620ac 100644
--- a/arch/blackfin/mach-bf561/secondary.S
+++ b/arch/blackfin/mach-bf561/secondary.S
@@ -23,108 +23,78 @@
23#define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12) 23#define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
24 24
25ENTRY(_coreb_trampoline_start) 25ENTRY(_coreb_trampoline_start)
26 /* Set the SYSCFG register */ 26 /* Enable Cycle Counter and Nesting Of Interrupts */
27 R0 = 0x36; 27#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
28 SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/ 28 R0 = SYSCFG_SNEN;
29 R0 = 0; 29#else
30 30 R0 = SYSCFG_SNEN | SYSCFG_CCEN;
31 /*Clear Out All the data and pointer Registers*/ 31#endif
32 R1 = R0; 32 SYSCFG = R0;
33 R2 = R0;
34 R3 = R0;
35 R4 = R0;
36 R5 = R0;
37 R6 = R0;
38 R7 = R0;
39
40 P0 = R0;
41 P1 = R0;
42 P2 = R0;
43 P3 = R0;
44 P4 = R0;
45 P5 = R0;
46
47 LC0 = r0;
48 LC1 = r0;
49 L0 = r0;
50 L1 = r0;
51 L2 = r0;
52 L3 = r0;
53
54 /* Clear Out All the DAG Registers*/
55 B0 = r0;
56 B1 = r0;
57 B2 = r0;
58 B3 = r0;
59
60 I0 = r0;
61 I1 = r0;
62 I2 = r0;
63 I3 = r0;
64
65 M0 = r0;
66 M1 = r0;
67 M2 = r0;
68 M3 = r0;
69 33
70 trace_buffer_init(p0,r0); 34 /* Optimization register tricks: keep a base value in the
35 * reserved P registers so we use the load/store with an
36 * offset syntax. R0 = [P5 + <constant>];
37 * P5 - core MMR base
38 * R6 - 0
39 */
40 r6 = 0;
41 p5.l = 0;
42 p5.h = hi(COREMMR_BASE);
71 43
72 /* Turn off the icache */ 44 /* Zero out registers required by Blackfin ABI */
73 p0.l = LO(IMEM_CONTROL);
74 p0.h = HI(IMEM_CONTROL);
75 R1 = [p0];
76 R0 = ~ENICPLB;
77 R0 = R0 & R1;
78 45
79 /* Disabling of CPLBs should be proceeded by a CSYNC */ 46 /* Disable circular buffers */
47 L0 = r6;
48 L1 = r6;
49 L2 = r6;
50 L3 = r6;
51
52 /* Disable hardware loops in case we were started by 'go' */
53 LC0 = r6;
54 LC1 = r6;
55
56 /*
57 * Clear ITEST_COMMAND and DTEST_COMMAND registers,
58 * Leaving these as non-zero can confuse the emulator
59 */
60 [p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
61 [p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
80 CSYNC; 62 CSYNC;
81 [p0] = R0; 63
64 trace_buffer_init(p0,r0);
65
66 /* Turn off the icache */
67 r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
68 BITCLR (r1, ENICPLB_P);
69 [p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
82 SSYNC; 70 SSYNC;
83 71
84 /* Turn off the dcache */ 72 /* Turn off the dcache */
85 p0.l = LO(DMEM_CONTROL); 73 r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
86 p0.h = HI(DMEM_CONTROL); 74 BITCLR (r1, ENDCPLB_P);
87 R1 = [p0]; 75 [p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
88 R0 = ~ENDCPLB;
89 R0 = R0 & R1;
90
91 /* Disabling of CPLBs should be proceeded by a CSYNC */
92 CSYNC;
93 [p0] = R0;
94 SSYNC; 76 SSYNC;
95 77
96 /* in case of double faults, save a few things */ 78 /* in case of double faults, save a few things */
97 p0.l = _init_retx_coreb; 79 p1.l = _initial_pda_coreb;
98 p0.h = _init_retx_coreb; 80 p1.h = _initial_pda_coreb;
99 R0 = RETX; 81 r4 = RETX;
100 [P0] = R0;
101
102#ifdef CONFIG_DEBUG_DOUBLEFAULT 82#ifdef CONFIG_DEBUG_DOUBLEFAULT
103 /* Only save these if we are storing them, 83 /* Only save these if we are storing them,
104 * This happens here, since L1 gets clobbered 84 * This happens here, since L1 gets clobbered
105 * below 85 * below
106 */ 86 */
107 GET_PDA(p0, r0); 87 GET_PDA(p0, r0);
108 r7 = [p0 + PDA_DF_RETX]; 88 r0 = [p0 + PDA_DF_RETX];
109 p1.l = _init_saved_retx_coreb; 89 r1 = [p0 + PDA_DF_DCPLB];
110 p1.h = _init_saved_retx_coreb; 90 r2 = [p0 + PDA_DF_ICPLB];
111 [p1] = r7; 91 r3 = [p0 + PDA_DF_SEQSTAT];
112 92 [p1 + PDA_INIT_DF_RETX] = r0;
113 r7 = [p0 + PDA_DF_DCPLB]; 93 [p1 + PDA_INIT_DF_DCPLB] = r1;
114 p1.l = _init_saved_dcplb_fault_addr_coreb; 94 [p1 + PDA_INIT_DF_ICPLB] = r2;
115 p1.h = _init_saved_dcplb_fault_addr_coreb; 95 [p1 + PDA_INIT_DF_SEQSTAT] = r3;
116 [p1] = r7;
117
118 r7 = [p0 + PDA_DF_ICPLB];
119 p1.l = _init_saved_icplb_fault_addr_coreb;
120 p1.h = _init_saved_icplb_fault_addr_coreb;
121 [p1] = r7;
122
123 r7 = [p0 + PDA_DF_SEQSTAT];
124 p1.l = _init_saved_seqstat_coreb;
125 p1.h = _init_saved_seqstat_coreb;
126 [p1] = r7;
127#endif 96#endif
97 [p1 + PDA_INIT_RETX] = r4;
128 98
129 /* Initialize stack pointer */ 99 /* Initialize stack pointer */
130 sp.l = lo(INITIAL_STACK); 100 sp.l = lo(INITIAL_STACK);
@@ -138,19 +108,13 @@ ENTRY(_coreb_trampoline_start)
138 108
139 /* EVT15 = _real_start */ 109 /* EVT15 = _real_start */
140 110
141 p0.l = lo(EVT15);
142 p0.h = hi(EVT15);
143 p1.l = _coreb_start; 111 p1.l = _coreb_start;
144 p1.h = _coreb_start; 112 p1.h = _coreb_start;
145 [p0] = p1; 113 [p5 + (EVT15 - COREMMR_BASE)] = p1;
146 csync; 114 csync;
147 115
148 p0.l = lo(IMASK); 116 r0 = EVT_IVG15 (z);
149 p0.h = hi(IMASK); 117 sti r0;
150 p1.l = IMASK_IVG15;
151 p1.h = 0x0;
152 [p0] = p1;
153 csync;
154 118
155 raise 15; 119 raise 15;
156 p0.l = .LWAIT_HERE; 120 p0.l = .LWAIT_HERE;
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 9cfdd49a3127..1c534d298de4 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -12,8 +12,8 @@
12.section .l1.text 12.section .l1.text
13 13
14ENTRY(_sleep_mode) 14ENTRY(_sleep_mode)
15 [--SP] = ( R7:0, P5:0 ); 15 [--SP] = (R7:4, P5:3);
16 [--SP] = RETS; 16 [--SP] = RETS;
17 17
18 call _set_sic_iwr; 18 call _set_sic_iwr;
19 19
@@ -46,15 +46,25 @@ ENTRY(_sleep_mode)
46 call _test_pll_locked; 46 call _test_pll_locked;
47 47
48 RETS = [SP++]; 48 RETS = [SP++];
49 ( R7:0, P5:0 ) = [SP++]; 49 (R7:4, P5:3) = [SP++];
50 RTS; 50 RTS;
51ENDPROC(_sleep_mode) 51ENDPROC(_sleep_mode)
52 52
53/*
54 * This func never returns as it puts the part into hibernate, and
55 * is only called from do_hibernate, so we don't bother saving or
56 * restoring any of the normal C runtime state. When we wake up,
57 * the entry point will be in do_hibernate and not here.
58 *
59 * We accept just one argument -- the value to write to VR_CTL.
60 */
53ENTRY(_hibernate_mode) 61ENTRY(_hibernate_mode)
54 [--SP] = ( R7:0, P5:0 ); 62 /* Save/setup the regs we need early for minor pipeline optimization */
55 [--SP] = RETS; 63 R4 = R0;
64 P3.H = hi(VR_CTL);
65 P3.L = lo(VR_CTL);
56 66
57 R3 = R0; 67 /* Disable all wakeup sources */
58 R0 = IWR_DISABLE_ALL; 68 R0 = IWR_DISABLE_ALL;
59 R1 = IWR_DISABLE_ALL; 69 R1 = IWR_DISABLE_ALL;
60 R2 = IWR_DISABLE_ALL; 70 R2 = IWR_DISABLE_ALL;
@@ -62,10 +72,8 @@ ENTRY(_hibernate_mode)
62 call _set_dram_srfs; 72 call _set_dram_srfs;
63 SSYNC; 73 SSYNC;
64 74
65 P0.H = hi(VR_CTL); 75 /* Finally, we climb into our cave to hibernate */
66 P0.L = lo(VR_CTL); 76 W[P3] = R4.L;
67
68 W[P0] = R3.L;
69 CLI R2; 77 CLI R2;
70 IDLE; 78 IDLE;
71.Lforever: 79.Lforever:
@@ -73,8 +81,8 @@ ENTRY(_hibernate_mode)
73ENDPROC(_hibernate_mode) 81ENDPROC(_hibernate_mode)
74 82
75ENTRY(_sleep_deeper) 83ENTRY(_sleep_deeper)
76 [--SP] = ( R7:0, P5:0 ); 84 [--SP] = (R7:4, P5:3);
77 [--SP] = RETS; 85 [--SP] = RETS;
78 86
79 CLI R4; 87 CLI R4;
80 88
@@ -167,7 +175,7 @@ ENTRY(_sleep_deeper)
167 STI R4; 175 STI R4;
168 176
169 RETS = [SP++]; 177 RETS = [SP++];
170 ( R7:0, P5:0 ) = [SP++]; 178 (R7:4, P5:3) = [SP++];
171 RTS; 179 RTS;
172ENDPROC(_sleep_deeper) 180ENDPROC(_sleep_deeper)
173 181
@@ -188,21 +196,20 @@ ENTRY(_set_dram_srfs)
188#else /* SDRAM */ 196#else /* SDRAM */
189 P0.L = lo(EBIU_SDGCTL); 197 P0.L = lo(EBIU_SDGCTL);
190 P0.H = hi(EBIU_SDGCTL); 198 P0.H = hi(EBIU_SDGCTL);
199 P1.L = lo(EBIU_SDSTAT);
200 P1.H = hi(EBIU_SDSTAT);
201
191 R2 = [P0]; 202 R2 = [P0];
192 BITSET(R2, 24); /* SRFS enter self-refresh mode */ 203 BITSET(R2, 24); /* SRFS enter self-refresh mode */
193 [P0] = R2; 204 [P0] = R2;
194 SSYNC; 205 SSYNC;
195 206
196 P0.L = lo(EBIU_SDSTAT);
197 P0.H = hi(EBIU_SDSTAT);
1981: 2071:
199 R2 = w[P0]; 208 R2 = w[P1];
200 SSYNC; 209 SSYNC;
201 cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */ 210 cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
202 if !cc jump 1b; 211 if !cc jump 1b;
203 212
204 P0.L = lo(EBIU_SDGCTL);
205 P0.H = hi(EBIU_SDGCTL);
206 R2 = [P0]; 213 R2 = [P0];
207 BITCLR(R2, 0); /* SCTLE disable CLKOUT */ 214 BITCLR(R2, 0); /* SCTLE disable CLKOUT */
208 [P0] = R2; 215 [P0] = R2;
@@ -212,6 +219,7 @@ ENDPROC(_set_dram_srfs)
212 219
213ENTRY(_unset_dram_srfs) 220ENTRY(_unset_dram_srfs)
214 /* set the dram out of self refresh mode */ 221 /* set the dram out of self refresh mode */
222
215#if defined(EBIU_RSTCTL) /* DDR */ 223#if defined(EBIU_RSTCTL) /* DDR */
216 P0.H = hi(EBIU_RSTCTL); 224 P0.H = hi(EBIU_RSTCTL);
217 P0.L = lo(EBIU_RSTCTL); 225 P0.L = lo(EBIU_RSTCTL);
@@ -219,42 +227,39 @@ ENTRY(_unset_dram_srfs)
219 BITCLR(R2, 3); /* clear SRREQ bit */ 227 BITCLR(R2, 3); /* clear SRREQ bit */
220 [P0] = R2; 228 [P0] = R2;
221#elif defined(EBIU_SDGCTL) /* SDRAM */ 229#elif defined(EBIU_SDGCTL) /* SDRAM */
222 230 /* release CLKOUT from self-refresh */
223 P0.L = lo(EBIU_SDGCTL); /* release CLKOUT from self-refresh */ 231 P0.L = lo(EBIU_SDGCTL);
224 P0.H = hi(EBIU_SDGCTL); 232 P0.H = hi(EBIU_SDGCTL);
233
225 R2 = [P0]; 234 R2 = [P0];
226 BITSET(R2, 0); /* SCTLE enable CLKOUT */ 235 BITSET(R2, 0); /* SCTLE enable CLKOUT */
227 [P0] = R2 236 [P0] = R2
228 SSYNC; 237 SSYNC;
229 238
230 P0.L = lo(EBIU_SDGCTL); /* release SDRAM from self-refresh */ 239 /* release SDRAM from self-refresh */
231 P0.H = hi(EBIU_SDGCTL);
232 R2 = [P0]; 240 R2 = [P0];
233 BITCLR(R2, 24); /* clear SRFS bit */ 241 BITCLR(R2, 24); /* clear SRFS bit */
234 [P0] = R2 242 [P0] = R2
235#endif 243#endif
244
236 SSYNC; 245 SSYNC;
237 RTS; 246 RTS;
238ENDPROC(_unset_dram_srfs) 247ENDPROC(_unset_dram_srfs)
239 248
240ENTRY(_set_sic_iwr) 249ENTRY(_set_sic_iwr)
241#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \ 250#ifdef SIC_IWR0
242 defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x) 251 P0.H = hi(SYSMMR_BASE);
243 P0.H = hi(SIC_IWR0); 252 P0.L = lo(SYSMMR_BASE);
244 P0.L = lo(SIC_IWR0); 253 [P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
245 P1.H = hi(SIC_IWR1); 254 [P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
246 P1.L = lo(SIC_IWR1); 255# ifdef SIC_IWR2
247 [P1] = R1; 256 [P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
248#if defined(CONFIG_BF54x) 257# endif
249 P1.H = hi(SIC_IWR2);
250 P1.L = lo(SIC_IWR2);
251 [P1] = R2;
252#endif
253#else 258#else
254 P0.H = hi(SIC_IWR); 259 P0.H = hi(SIC_IWR);
255 P0.L = lo(SIC_IWR); 260 P0.L = lo(SIC_IWR);
256#endif
257 [P0] = R0; 261 [P0] = R0;
262#endif
258 263
259 SSYNC; 264 SSYNC;
260 RTS; 265 RTS;
@@ -272,206 +277,55 @@ ENDPROC(_test_pll_locked)
272 277
273.section .text 278.section .text
274 279
275ENTRY(_do_hibernate) 280#define PM_REG0 R7
276 [--SP] = ( R7:0, P5:0 ); 281#define PM_REG1 R6
277 [--SP] = RETS; 282#define PM_REG2 R5
278 /* Save System MMRs */ 283#define PM_REG3 R4
279 R2 = R0; 284#define PM_REG4 R3
280 P0.H = hi(PLL_CTL); 285#define PM_REG5 R2
281 P0.L = lo(PLL_CTL); 286#define PM_REG6 R1
282 287#define PM_REG7 R0
283#ifdef SIC_IMASK0 288#define PM_REG8 P5
284 PM_SYS_PUSH(SIC_IMASK0) 289#define PM_REG9 P4
285#endif 290#define PM_REG10 P3
286#ifdef SIC_IMASK1 291#define PM_REG11 P2
287 PM_SYS_PUSH(SIC_IMASK1) 292#define PM_REG12 P1
288#endif 293#define PM_REG13 P0
289#ifdef SIC_IMASK2 294
290 PM_SYS_PUSH(SIC_IMASK2) 295#define PM_REGSET0 R7:7
291#endif 296#define PM_REGSET1 R7:6
292#ifdef SIC_IMASK 297#define PM_REGSET2 R7:5
293 PM_SYS_PUSH(SIC_IMASK) 298#define PM_REGSET3 R7:4
294#endif 299#define PM_REGSET4 R7:3
295#ifdef SIC_IAR0 300#define PM_REGSET5 R7:2
296 PM_SYS_PUSH(SIC_IAR0) 301#define PM_REGSET6 R7:1
297 PM_SYS_PUSH(SIC_IAR1) 302#define PM_REGSET7 R7:0
298 PM_SYS_PUSH(SIC_IAR2) 303#define PM_REGSET8 R7:0, P5:5
299#endif 304#define PM_REGSET9 R7:0, P5:4
300#ifdef SIC_IAR3 305#define PM_REGSET10 R7:0, P5:3
301 PM_SYS_PUSH(SIC_IAR3) 306#define PM_REGSET11 R7:0, P5:2
302#endif 307#define PM_REGSET12 R7:0, P5:1
303#ifdef SIC_IAR4 308#define PM_REGSET13 R7:0, P5:0
304 PM_SYS_PUSH(SIC_IAR4) 309
305 PM_SYS_PUSH(SIC_IAR5) 310#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
306 PM_SYS_PUSH(SIC_IAR6) 311#define _PM_POP(n, x, w, base) w[FP + ((x) - (base))] = PM_REG##n;
307#endif 312#define PM_PUSH_SYNC(n) [--sp] = (PM_REGSET##n);
308#ifdef SIC_IAR7 313#define PM_POP_SYNC(n) (PM_REGSET##n) = [sp++];
309 PM_SYS_PUSH(SIC_IAR7) 314#define PM_PUSH(n, x) PM_REG##n = [FP++];
310#endif 315#define PM_POP(n, x) [FP--] = PM_REG##n;
311#ifdef SIC_IAR8 316#define PM_CORE_PUSH(n, x) _PM_PUSH(n, x, , COREMMR_BASE)
312 PM_SYS_PUSH(SIC_IAR8) 317#define PM_CORE_POP(n, x) _PM_POP(n, x, , COREMMR_BASE)
313 PM_SYS_PUSH(SIC_IAR9) 318#define PM_SYS_PUSH(n, x) _PM_PUSH(n, x, , SYSMMR_BASE)
314 PM_SYS_PUSH(SIC_IAR10) 319#define PM_SYS_POP(n, x) _PM_POP(n, x, , SYSMMR_BASE)
315 PM_SYS_PUSH(SIC_IAR11) 320#define PM_SYS_PUSH16(n, x) _PM_PUSH(n, x, w, SYSMMR_BASE)
316#endif 321#define PM_SYS_POP16(n, x) _PM_POP(n, x, w, SYSMMR_BASE)
317 322
318#ifdef SIC_IWR 323ENTRY(_do_hibernate)
319 PM_SYS_PUSH(SIC_IWR) 324 /*
320#endif 325 * Save the core regs early so we can blow them away when
321#ifdef SIC_IWR0 326 * saving/restoring MMR states
322 PM_SYS_PUSH(SIC_IWR0) 327 */
323#endif 328 [--sp] = (R7:0, P5:0);
324#ifdef SIC_IWR1
325 PM_SYS_PUSH(SIC_IWR1)
326#endif
327#ifdef SIC_IWR2
328 PM_SYS_PUSH(SIC_IWR2)
329#endif
330
331#ifdef PINT0_ASSIGN
332 PM_SYS_PUSH(PINT0_MASK_SET)
333 PM_SYS_PUSH(PINT1_MASK_SET)
334 PM_SYS_PUSH(PINT2_MASK_SET)
335 PM_SYS_PUSH(PINT3_MASK_SET)
336 PM_SYS_PUSH(PINT0_ASSIGN)
337 PM_SYS_PUSH(PINT1_ASSIGN)
338 PM_SYS_PUSH(PINT2_ASSIGN)
339 PM_SYS_PUSH(PINT3_ASSIGN)
340 PM_SYS_PUSH(PINT0_INVERT_SET)
341 PM_SYS_PUSH(PINT1_INVERT_SET)
342 PM_SYS_PUSH(PINT2_INVERT_SET)
343 PM_SYS_PUSH(PINT3_INVERT_SET)
344 PM_SYS_PUSH(PINT0_EDGE_SET)
345 PM_SYS_PUSH(PINT1_EDGE_SET)
346 PM_SYS_PUSH(PINT2_EDGE_SET)
347 PM_SYS_PUSH(PINT3_EDGE_SET)
348#endif
349
350 PM_SYS_PUSH(EBIU_AMBCTL0)
351 PM_SYS_PUSH(EBIU_AMBCTL1)
352 PM_SYS_PUSH16(EBIU_AMGCTL)
353
354#ifdef EBIU_FCTL
355 PM_SYS_PUSH(EBIU_MBSCTL)
356 PM_SYS_PUSH(EBIU_MODE)
357 PM_SYS_PUSH(EBIU_FCTL)
358#endif
359
360#ifdef PORTCIO_FER
361 PM_SYS_PUSH16(PORTCIO_DIR)
362 PM_SYS_PUSH16(PORTCIO_INEN)
363 PM_SYS_PUSH16(PORTCIO)
364 PM_SYS_PUSH16(PORTCIO_FER)
365 PM_SYS_PUSH16(PORTDIO_DIR)
366 PM_SYS_PUSH16(PORTDIO_INEN)
367 PM_SYS_PUSH16(PORTDIO)
368 PM_SYS_PUSH16(PORTDIO_FER)
369 PM_SYS_PUSH16(PORTEIO_DIR)
370 PM_SYS_PUSH16(PORTEIO_INEN)
371 PM_SYS_PUSH16(PORTEIO)
372 PM_SYS_PUSH16(PORTEIO_FER)
373#endif
374
375 PM_SYS_PUSH16(SYSCR)
376
377 /* Save Core MMRs */
378 P0.H = hi(SRAM_BASE_ADDRESS);
379 P0.L = lo(SRAM_BASE_ADDRESS);
380
381 PM_PUSH(DMEM_CONTROL)
382 PM_PUSH(DCPLB_ADDR0)
383 PM_PUSH(DCPLB_ADDR1)
384 PM_PUSH(DCPLB_ADDR2)
385 PM_PUSH(DCPLB_ADDR3)
386 PM_PUSH(DCPLB_ADDR4)
387 PM_PUSH(DCPLB_ADDR5)
388 PM_PUSH(DCPLB_ADDR6)
389 PM_PUSH(DCPLB_ADDR7)
390 PM_PUSH(DCPLB_ADDR8)
391 PM_PUSH(DCPLB_ADDR9)
392 PM_PUSH(DCPLB_ADDR10)
393 PM_PUSH(DCPLB_ADDR11)
394 PM_PUSH(DCPLB_ADDR12)
395 PM_PUSH(DCPLB_ADDR13)
396 PM_PUSH(DCPLB_ADDR14)
397 PM_PUSH(DCPLB_ADDR15)
398 PM_PUSH(DCPLB_DATA0)
399 PM_PUSH(DCPLB_DATA1)
400 PM_PUSH(DCPLB_DATA2)
401 PM_PUSH(DCPLB_DATA3)
402 PM_PUSH(DCPLB_DATA4)
403 PM_PUSH(DCPLB_DATA5)
404 PM_PUSH(DCPLB_DATA6)
405 PM_PUSH(DCPLB_DATA7)
406 PM_PUSH(DCPLB_DATA8)
407 PM_PUSH(DCPLB_DATA9)
408 PM_PUSH(DCPLB_DATA10)
409 PM_PUSH(DCPLB_DATA11)
410 PM_PUSH(DCPLB_DATA12)
411 PM_PUSH(DCPLB_DATA13)
412 PM_PUSH(DCPLB_DATA14)
413 PM_PUSH(DCPLB_DATA15)
414 PM_PUSH(IMEM_CONTROL)
415 PM_PUSH(ICPLB_ADDR0)
416 PM_PUSH(ICPLB_ADDR1)
417 PM_PUSH(ICPLB_ADDR2)
418 PM_PUSH(ICPLB_ADDR3)
419 PM_PUSH(ICPLB_ADDR4)
420 PM_PUSH(ICPLB_ADDR5)
421 PM_PUSH(ICPLB_ADDR6)
422 PM_PUSH(ICPLB_ADDR7)
423 PM_PUSH(ICPLB_ADDR8)
424 PM_PUSH(ICPLB_ADDR9)
425 PM_PUSH(ICPLB_ADDR10)
426 PM_PUSH(ICPLB_ADDR11)
427 PM_PUSH(ICPLB_ADDR12)
428 PM_PUSH(ICPLB_ADDR13)
429 PM_PUSH(ICPLB_ADDR14)
430 PM_PUSH(ICPLB_ADDR15)
431 PM_PUSH(ICPLB_DATA0)
432 PM_PUSH(ICPLB_DATA1)
433 PM_PUSH(ICPLB_DATA2)
434 PM_PUSH(ICPLB_DATA3)
435 PM_PUSH(ICPLB_DATA4)
436 PM_PUSH(ICPLB_DATA5)
437 PM_PUSH(ICPLB_DATA6)
438 PM_PUSH(ICPLB_DATA7)
439 PM_PUSH(ICPLB_DATA8)
440 PM_PUSH(ICPLB_DATA9)
441 PM_PUSH(ICPLB_DATA10)
442 PM_PUSH(ICPLB_DATA11)
443 PM_PUSH(ICPLB_DATA12)
444 PM_PUSH(ICPLB_DATA13)
445 PM_PUSH(ICPLB_DATA14)
446 PM_PUSH(ICPLB_DATA15)
447 PM_PUSH(EVT0)
448 PM_PUSH(EVT1)
449 PM_PUSH(EVT2)
450 PM_PUSH(EVT3)
451 PM_PUSH(EVT4)
452 PM_PUSH(EVT5)
453 PM_PUSH(EVT6)
454 PM_PUSH(EVT7)
455 PM_PUSH(EVT8)
456 PM_PUSH(EVT9)
457 PM_PUSH(EVT10)
458 PM_PUSH(EVT11)
459 PM_PUSH(EVT12)
460 PM_PUSH(EVT13)
461 PM_PUSH(EVT14)
462 PM_PUSH(EVT15)
463 PM_PUSH(IMASK)
464 PM_PUSH(ILAT)
465 PM_PUSH(IPRIO)
466 PM_PUSH(TCNTL)
467 PM_PUSH(TPERIOD)
468 PM_PUSH(TSCALE)
469 PM_PUSH(TCOUNT)
470 PM_PUSH(TBUFCTL)
471
472 /* Save Core Registers */
473 [--sp] = SYSCFG;
474 [--sp] = ( R7:0, P5:0 );
475 [--sp] = fp; 329 [--sp] = fp;
476 [--sp] = usp; 330 [--sp] = usp;
477 331
@@ -506,47 +360,497 @@ ENTRY(_do_hibernate)
506 [--sp] = LB0; 360 [--sp] = LB0;
507 [--sp] = LB1; 361 [--sp] = LB1;
508 362
363 /* We can't push RETI directly as that'll change IPEND[4] */
364 r7 = RETI;
365 [--sp] = RETS;
509 [--sp] = ASTAT; 366 [--sp] = ASTAT;
510 [--sp] = CYCLES; 367 [--sp] = CYCLES;
511 [--sp] = CYCLES2; 368 [--sp] = CYCLES2;
512 369 [--sp] = SYSCFG;
513 [--sp] = RETS;
514 r0 = RETI;
515 [--sp] = r0;
516 [--sp] = RETX; 370 [--sp] = RETX;
517 [--sp] = RETN;
518 [--sp] = RETE;
519 [--sp] = SEQSTAT; 371 [--sp] = SEQSTAT;
372 [--sp] = r7;
373
374 /* Save first func arg in M3 */
375 M3 = R0;
376
377 /* Save system MMRs */
378 FP.H = hi(SYSMMR_BASE);
379 FP.L = lo(SYSMMR_BASE);
380
381#ifdef SIC_IMASK0
382 PM_SYS_PUSH(0, SIC_IMASK0)
383 PM_SYS_PUSH(1, SIC_IMASK1)
384# ifdef SIC_IMASK2
385 PM_SYS_PUSH(2, SIC_IMASK2)
386# endif
387#else
388 PM_SYS_PUSH(0, SIC_IMASK)
389#endif
390#ifdef SIC_IAR0
391 PM_SYS_PUSH(3, SIC_IAR0)
392 PM_SYS_PUSH(4, SIC_IAR1)
393 PM_SYS_PUSH(5, SIC_IAR2)
394#endif
395#ifdef SIC_IAR3
396 PM_SYS_PUSH(6, SIC_IAR3)
397#endif
398#ifdef SIC_IAR4
399 PM_SYS_PUSH(7, SIC_IAR4)
400 PM_SYS_PUSH(8, SIC_IAR5)
401 PM_SYS_PUSH(9, SIC_IAR6)
402#endif
403#ifdef SIC_IAR7
404 PM_SYS_PUSH(10, SIC_IAR7)
405#endif
406#ifdef SIC_IAR8
407 PM_SYS_PUSH(11, SIC_IAR8)
408 PM_SYS_PUSH(12, SIC_IAR9)
409 PM_SYS_PUSH(13, SIC_IAR10)
410#endif
411 PM_PUSH_SYNC(13)
412#ifdef SIC_IAR11
413 PM_SYS_PUSH(0, SIC_IAR11)
414#endif
415
416#ifdef SIC_IWR
417 PM_SYS_PUSH(1, SIC_IWR)
418#endif
419#ifdef SIC_IWR0
420 PM_SYS_PUSH(1, SIC_IWR0)
421#endif
422#ifdef SIC_IWR1
423 PM_SYS_PUSH(2, SIC_IWR1)
424#endif
425#ifdef SIC_IWR2
426 PM_SYS_PUSH(3, SIC_IWR2)
427#endif
428
429#ifdef PINT0_ASSIGN
430 PM_SYS_PUSH(4, PINT0_MASK_SET)
431 PM_SYS_PUSH(5, PINT1_MASK_SET)
432 PM_SYS_PUSH(6, PINT2_MASK_SET)
433 PM_SYS_PUSH(7, PINT3_MASK_SET)
434 PM_SYS_PUSH(8, PINT0_ASSIGN)
435 PM_SYS_PUSH(9, PINT1_ASSIGN)
436 PM_SYS_PUSH(10, PINT2_ASSIGN)
437 PM_SYS_PUSH(11, PINT3_ASSIGN)
438 PM_SYS_PUSH(12, PINT0_INVERT_SET)
439 PM_SYS_PUSH(13, PINT1_INVERT_SET)
440 PM_PUSH_SYNC(13)
441 PM_SYS_PUSH(0, PINT2_INVERT_SET)
442 PM_SYS_PUSH(1, PINT3_INVERT_SET)
443 PM_SYS_PUSH(2, PINT0_EDGE_SET)
444 PM_SYS_PUSH(3, PINT1_EDGE_SET)
445 PM_SYS_PUSH(4, PINT2_EDGE_SET)
446 PM_SYS_PUSH(5, PINT3_EDGE_SET)
447#endif
448
449 PM_SYS_PUSH16(6, SYSCR)
450
451 PM_SYS_PUSH16(7, EBIU_AMGCTL)
452 PM_SYS_PUSH(8, EBIU_AMBCTL0)
453 PM_SYS_PUSH(9, EBIU_AMBCTL1)
454#ifdef EBIU_FCTL
455 PM_SYS_PUSH(10, EBIU_MBSCTL)
456 PM_SYS_PUSH(11, EBIU_MODE)
457 PM_SYS_PUSH(12, EBIU_FCTL)
458 PM_PUSH_SYNC(12)
459#else
460 PM_PUSH_SYNC(9)
461#endif
462
463 /* Save Core MMRs */
464 I0.H = hi(COREMMR_BASE);
465 I0.L = lo(COREMMR_BASE);
466 I1 = I0;
467 I2 = I0;
468 I3 = I0;
469 B0 = I0;
470 B1 = I0;
471 B2 = I0;
472 B3 = I0;
473 I1.L = lo(DCPLB_ADDR0);
474 I2.L = lo(DCPLB_DATA0);
475 I3.L = lo(ICPLB_ADDR0);
476 B0.L = lo(ICPLB_DATA0);
477 B1.L = lo(EVT2);
478 B2.L = lo(IMASK);
479 B3.L = lo(TCNTL);
480
481 /* DCPLB Addr */
482 FP = I1;
483 PM_PUSH(0, DCPLB_ADDR0)
484 PM_PUSH(1, DCPLB_ADDR1)
485 PM_PUSH(2, DCPLB_ADDR2)
486 PM_PUSH(3, DCPLB_ADDR3)
487 PM_PUSH(4, DCPLB_ADDR4)
488 PM_PUSH(5, DCPLB_ADDR5)
489 PM_PUSH(6, DCPLB_ADDR6)
490 PM_PUSH(7, DCPLB_ADDR7)
491 PM_PUSH(8, DCPLB_ADDR8)
492 PM_PUSH(9, DCPLB_ADDR9)
493 PM_PUSH(10, DCPLB_ADDR10)
494 PM_PUSH(11, DCPLB_ADDR11)
495 PM_PUSH(12, DCPLB_ADDR12)
496 PM_PUSH(13, DCPLB_ADDR13)
497 PM_PUSH_SYNC(13)
498 PM_PUSH(0, DCPLB_ADDR14)
499 PM_PUSH(1, DCPLB_ADDR15)
500
501 /* DCPLB Data */
502 FP = I2;
503 PM_PUSH(2, DCPLB_DATA0)
504 PM_PUSH(3, DCPLB_DATA1)
505 PM_PUSH(4, DCPLB_DATA2)
506 PM_PUSH(5, DCPLB_DATA3)
507 PM_PUSH(6, DCPLB_DATA4)
508 PM_PUSH(7, DCPLB_DATA5)
509 PM_PUSH(8, DCPLB_DATA6)
510 PM_PUSH(9, DCPLB_DATA7)
511 PM_PUSH(10, DCPLB_DATA8)
512 PM_PUSH(11, DCPLB_DATA9)
513 PM_PUSH(12, DCPLB_DATA10)
514 PM_PUSH(13, DCPLB_DATA11)
515 PM_PUSH_SYNC(13)
516 PM_PUSH(0, DCPLB_DATA12)
517 PM_PUSH(1, DCPLB_DATA13)
518 PM_PUSH(2, DCPLB_DATA14)
519 PM_PUSH(3, DCPLB_DATA15)
520
521 /* ICPLB Addr */
522 FP = I3;
523 PM_PUSH(4, ICPLB_ADDR0)
524 PM_PUSH(5, ICPLB_ADDR1)
525 PM_PUSH(6, ICPLB_ADDR2)
526 PM_PUSH(7, ICPLB_ADDR3)
527 PM_PUSH(8, ICPLB_ADDR4)
528 PM_PUSH(9, ICPLB_ADDR5)
529 PM_PUSH(10, ICPLB_ADDR6)
530 PM_PUSH(11, ICPLB_ADDR7)
531 PM_PUSH(12, ICPLB_ADDR8)
532 PM_PUSH(13, ICPLB_ADDR9)
533 PM_PUSH_SYNC(13)
534 PM_PUSH(0, ICPLB_ADDR10)
535 PM_PUSH(1, ICPLB_ADDR11)
536 PM_PUSH(2, ICPLB_ADDR12)
537 PM_PUSH(3, ICPLB_ADDR13)
538 PM_PUSH(4, ICPLB_ADDR14)
539 PM_PUSH(5, ICPLB_ADDR15)
540
541 /* ICPLB Data */
542 FP = B0;
543 PM_PUSH(6, ICPLB_DATA0)
544 PM_PUSH(7, ICPLB_DATA1)
545 PM_PUSH(8, ICPLB_DATA2)
546 PM_PUSH(9, ICPLB_DATA3)
547 PM_PUSH(10, ICPLB_DATA4)
548 PM_PUSH(11, ICPLB_DATA5)
549 PM_PUSH(12, ICPLB_DATA6)
550 PM_PUSH(13, ICPLB_DATA7)
551 PM_PUSH_SYNC(13)
552 PM_PUSH(0, ICPLB_DATA8)
553 PM_PUSH(1, ICPLB_DATA9)
554 PM_PUSH(2, ICPLB_DATA10)
555 PM_PUSH(3, ICPLB_DATA11)
556 PM_PUSH(4, ICPLB_DATA12)
557 PM_PUSH(5, ICPLB_DATA13)
558 PM_PUSH(6, ICPLB_DATA14)
559 PM_PUSH(7, ICPLB_DATA15)
560
561 /* Event Vectors */
562 FP = B1;
563 PM_PUSH(8, EVT2)
564 PM_PUSH(9, EVT3)
565 FP += 4; /* EVT4 */
566 PM_PUSH(10, EVT5)
567 PM_PUSH(11, EVT6)
568 PM_PUSH(12, EVT7)
569 PM_PUSH(13, EVT8)
570 PM_PUSH_SYNC(13)
571 PM_PUSH(0, EVT9)
572 PM_PUSH(1, EVT10)
573 PM_PUSH(2, EVT11)
574 PM_PUSH(3, EVT12)
575 PM_PUSH(4, EVT13)
576 PM_PUSH(5, EVT14)
577 PM_PUSH(6, EVT15)
578
579 /* CEC */
580 FP = B2;
581 PM_PUSH(7, IMASK)
582 FP += 4; /* IPEND */
583 PM_PUSH(8, ILAT)
584 PM_PUSH(9, IPRIO)
585
586 /* Core Timer */
587 FP = B3;
588 PM_PUSH(10, TCNTL)
589 PM_PUSH(11, TPERIOD)
590 PM_PUSH(12, TSCALE)
591 PM_PUSH(13, TCOUNT)
592 PM_PUSH_SYNC(13)
593
594 /* Misc non-contiguous registers */
595 FP = I0;
596 PM_CORE_PUSH(0, DMEM_CONTROL);
597 PM_CORE_PUSH(1, IMEM_CONTROL);
598 PM_CORE_PUSH(2, TBUFCTL);
599 PM_PUSH_SYNC(2)
600
601 /* Setup args to hibernate mode early for pipeline optimization */
602 R0 = M3;
603 P1.H = _hibernate_mode;
604 P1.L = _hibernate_mode;
520 605
521 /* Save Magic, return address and Stack Pointer */ 606 /* Save Magic, return address and Stack Pointer */
522 P0.H = 0; 607 P0 = 0;
523 P0.L = 0; 608 R1.H = 0xDEAD; /* Hibernate Magic */
524 R0.H = 0xDEAD; /* Hibernate Magic */ 609 R1.L = 0xBEEF;
525 R0.L = 0xBEEF; 610 R2.H = .Lpm_resume_here;
526 [P0++] = R0; /* Store Hibernate Magic */ 611 R2.L = .Lpm_resume_here;
527 R0.H = .Lpm_resume_here; 612 [P0++] = R1; /* Store Hibernate Magic */
528 R0.L = .Lpm_resume_here; 613 [P0++] = R2; /* Save Return Address */
529 [P0++] = R0; /* Save Return Address */
530 [P0++] = SP; /* Save Stack Pointer */ 614 [P0++] = SP; /* Save Stack Pointer */
531 P0.H = _hibernate_mode; 615
532 P0.L = _hibernate_mode; 616 /* Must use an indirect call as we need to jump to L1 */
533 R0 = R2; 617 call (P1); /* Goodbye */
534 call (P0); /* Goodbye */
535 618
536.Lpm_resume_here: 619.Lpm_resume_here:
537 620
621 /* Restore Core MMRs */
622 I0.H = hi(COREMMR_BASE);
623 I0.L = lo(COREMMR_BASE);
624 I1 = I0;
625 I2 = I0;
626 I3 = I0;
627 B0 = I0;
628 B1 = I0;
629 B2 = I0;
630 B3 = I0;
631 I1.L = lo(DCPLB_ADDR15);
632 I2.L = lo(DCPLB_DATA15);
633 I3.L = lo(ICPLB_ADDR15);
634 B0.L = lo(ICPLB_DATA15);
635 B1.L = lo(EVT15);
636 B2.L = lo(IPRIO);
637 B3.L = lo(TCOUNT);
638
639 /* Misc non-contiguous registers */
640 FP = I0;
641 PM_POP_SYNC(2)
642 PM_CORE_POP(2, TBUFCTL)
643 PM_CORE_POP(1, IMEM_CONTROL)
644 PM_CORE_POP(0, DMEM_CONTROL)
645
646 /* Core Timer */
647 PM_POP_SYNC(13)
648 FP = B3;
649 PM_POP(13, TCOUNT)
650 PM_POP(12, TSCALE)
651 PM_POP(11, TPERIOD)
652 PM_POP(10, TCNTL)
653
654 /* CEC */
655 FP = B2;
656 PM_POP(9, IPRIO)
657 PM_POP(8, ILAT)
658 FP += -4; /* IPEND */
659 PM_POP(7, IMASK)
660
661 /* Event Vectors */
662 FP = B1;
663 PM_POP(6, EVT15)
664 PM_POP(5, EVT14)
665 PM_POP(4, EVT13)
666 PM_POP(3, EVT12)
667 PM_POP(2, EVT11)
668 PM_POP(1, EVT10)
669 PM_POP(0, EVT9)
670 PM_POP_SYNC(13)
671 PM_POP(13, EVT8)
672 PM_POP(12, EVT7)
673 PM_POP(11, EVT6)
674 PM_POP(10, EVT5)
675 FP += -4; /* EVT4 */
676 PM_POP(9, EVT3)
677 PM_POP(8, EVT2)
678
679 /* ICPLB Data */
680 FP = B0;
681 PM_POP(7, ICPLB_DATA15)
682 PM_POP(6, ICPLB_DATA14)
683 PM_POP(5, ICPLB_DATA13)
684 PM_POP(4, ICPLB_DATA12)
685 PM_POP(3, ICPLB_DATA11)
686 PM_POP(2, ICPLB_DATA10)
687 PM_POP(1, ICPLB_DATA9)
688 PM_POP(0, ICPLB_DATA8)
689 PM_POP_SYNC(13)
690 PM_POP(13, ICPLB_DATA7)
691 PM_POP(12, ICPLB_DATA6)
692 PM_POP(11, ICPLB_DATA5)
693 PM_POP(10, ICPLB_DATA4)
694 PM_POP(9, ICPLB_DATA3)
695 PM_POP(8, ICPLB_DATA2)
696 PM_POP(7, ICPLB_DATA1)
697 PM_POP(6, ICPLB_DATA0)
698
699 /* ICPLB Addr */
700 FP = I3;
701 PM_POP(5, ICPLB_ADDR15)
702 PM_POP(4, ICPLB_ADDR14)
703 PM_POP(3, ICPLB_ADDR13)
704 PM_POP(2, ICPLB_ADDR12)
705 PM_POP(1, ICPLB_ADDR11)
706 PM_POP(0, ICPLB_ADDR10)
707 PM_POP_SYNC(13)
708 PM_POP(13, ICPLB_ADDR9)
709 PM_POP(12, ICPLB_ADDR8)
710 PM_POP(11, ICPLB_ADDR7)
711 PM_POP(10, ICPLB_ADDR6)
712 PM_POP(9, ICPLB_ADDR5)
713 PM_POP(8, ICPLB_ADDR4)
714 PM_POP(7, ICPLB_ADDR3)
715 PM_POP(6, ICPLB_ADDR2)
716 PM_POP(5, ICPLB_ADDR1)
717 PM_POP(4, ICPLB_ADDR0)
718
719 /* DCPLB Data */
720 FP = I2;
721 PM_POP(3, DCPLB_DATA15)
722 PM_POP(2, DCPLB_DATA14)
723 PM_POP(1, DCPLB_DATA13)
724 PM_POP(0, DCPLB_DATA12)
725 PM_POP_SYNC(13)
726 PM_POP(13, DCPLB_DATA11)
727 PM_POP(12, DCPLB_DATA10)
728 PM_POP(11, DCPLB_DATA9)
729 PM_POP(10, DCPLB_DATA8)
730 PM_POP(9, DCPLB_DATA7)
731 PM_POP(8, DCPLB_DATA6)
732 PM_POP(7, DCPLB_DATA5)
733 PM_POP(6, DCPLB_DATA4)
734 PM_POP(5, DCPLB_DATA3)
735 PM_POP(4, DCPLB_DATA2)
736 PM_POP(3, DCPLB_DATA1)
737 PM_POP(2, DCPLB_DATA0)
738
739 /* DCPLB Addr */
740 FP = I1;
741 PM_POP(1, DCPLB_ADDR15)
742 PM_POP(0, DCPLB_ADDR14)
743 PM_POP_SYNC(13)
744 PM_POP(13, DCPLB_ADDR13)
745 PM_POP(12, DCPLB_ADDR12)
746 PM_POP(11, DCPLB_ADDR11)
747 PM_POP(10, DCPLB_ADDR10)
748 PM_POP(9, DCPLB_ADDR9)
749 PM_POP(8, DCPLB_ADDR8)
750 PM_POP(7, DCPLB_ADDR7)
751 PM_POP(6, DCPLB_ADDR6)
752 PM_POP(5, DCPLB_ADDR5)
753 PM_POP(4, DCPLB_ADDR4)
754 PM_POP(3, DCPLB_ADDR3)
755 PM_POP(2, DCPLB_ADDR2)
756 PM_POP(1, DCPLB_ADDR1)
757 PM_POP(0, DCPLB_ADDR0)
758
759 /* Restore System MMRs */
760 FP.H = hi(SYSMMR_BASE);
761 FP.L = lo(SYSMMR_BASE);
762
763#ifdef EBIU_FCTL
764 PM_POP_SYNC(12)
765 PM_SYS_POP(12, EBIU_FCTL)
766 PM_SYS_POP(11, EBIU_MODE)
767 PM_SYS_POP(10, EBIU_MBSCTL)
768#else
769 PM_POP_SYNC(9)
770#endif
771 PM_SYS_POP(9, EBIU_AMBCTL1)
772 PM_SYS_POP(8, EBIU_AMBCTL0)
773 PM_SYS_POP16(7, EBIU_AMGCTL)
774
775 PM_SYS_POP16(6, SYSCR)
776
777#ifdef PINT0_ASSIGN
778 PM_SYS_POP(5, PINT3_EDGE_SET)
779 PM_SYS_POP(4, PINT2_EDGE_SET)
780 PM_SYS_POP(3, PINT1_EDGE_SET)
781 PM_SYS_POP(2, PINT0_EDGE_SET)
782 PM_SYS_POP(1, PINT3_INVERT_SET)
783 PM_SYS_POP(0, PINT2_INVERT_SET)
784 PM_POP_SYNC(13)
785 PM_SYS_POP(13, PINT1_INVERT_SET)
786 PM_SYS_POP(12, PINT0_INVERT_SET)
787 PM_SYS_POP(11, PINT3_ASSIGN)
788 PM_SYS_POP(10, PINT2_ASSIGN)
789 PM_SYS_POP(9, PINT1_ASSIGN)
790 PM_SYS_POP(8, PINT0_ASSIGN)
791 PM_SYS_POP(7, PINT3_MASK_SET)
792 PM_SYS_POP(6, PINT2_MASK_SET)
793 PM_SYS_POP(5, PINT1_MASK_SET)
794 PM_SYS_POP(4, PINT0_MASK_SET)
795#endif
796
797#ifdef SIC_IWR2
798 PM_SYS_POP(3, SIC_IWR2)
799#endif
800#ifdef SIC_IWR1
801 PM_SYS_POP(2, SIC_IWR1)
802#endif
803#ifdef SIC_IWR0
804 PM_SYS_POP(1, SIC_IWR0)
805#endif
806#ifdef SIC_IWR
807 PM_SYS_POP(1, SIC_IWR)
808#endif
809
810#ifdef SIC_IAR11
811 PM_SYS_POP(0, SIC_IAR11)
812#endif
813 PM_POP_SYNC(13)
814#ifdef SIC_IAR8
815 PM_SYS_POP(13, SIC_IAR10)
816 PM_SYS_POP(12, SIC_IAR9)
817 PM_SYS_POP(11, SIC_IAR8)
818#endif
819#ifdef SIC_IAR7
820 PM_SYS_POP(10, SIC_IAR7)
821#endif
822#ifdef SIC_IAR6
823 PM_SYS_POP(9, SIC_IAR6)
824 PM_SYS_POP(8, SIC_IAR5)
825 PM_SYS_POP(7, SIC_IAR4)
826#endif
827#ifdef SIC_IAR3
828 PM_SYS_POP(6, SIC_IAR3)
829#endif
830#ifdef SIC_IAR0
831 PM_SYS_POP(5, SIC_IAR2)
832 PM_SYS_POP(4, SIC_IAR1)
833 PM_SYS_POP(3, SIC_IAR0)
834#endif
835#ifdef SIC_IMASK0
836# ifdef SIC_IMASK2
837 PM_SYS_POP(2, SIC_IMASK2)
838# endif
839 PM_SYS_POP(1, SIC_IMASK1)
840 PM_SYS_POP(0, SIC_IMASK0)
841#else
842 PM_SYS_POP(0, SIC_IMASK)
843#endif
844
538 /* Restore Core Registers */ 845 /* Restore Core Registers */
846 RETI = [sp++];
539 SEQSTAT = [sp++]; 847 SEQSTAT = [sp++];
540 RETE = [sp++];
541 RETN = [sp++];
542 RETX = [sp++]; 848 RETX = [sp++];
543 r0 = [sp++]; 849 SYSCFG = [sp++];
544 RETI = r0;
545 RETS = [sp++];
546
547 CYCLES2 = [sp++]; 850 CYCLES2 = [sp++];
548 CYCLES = [sp++]; 851 CYCLES = [sp++];
549 ASTAT = [sp++]; 852 ASTAT = [sp++];
853 RETS = [sp++];
550 854
551 LB1 = [sp++]; 855 LB1 = [sp++];
552 LB0 = [sp++]; 856 LB0 = [sp++];
@@ -581,204 +885,10 @@ ENTRY(_do_hibernate)
581 885
582 usp = [sp++]; 886 usp = [sp++];
583 fp = [sp++]; 887 fp = [sp++];
584 888 (R7:0, P5:0) = [sp++];
585 ( R7 : 0, P5 : 0) = [ SP ++ ];
586 SYSCFG = [sp++];
587
588 /* Restore Core MMRs */
589
590 PM_POP(TBUFCTL)
591 PM_POP(TCOUNT)
592 PM_POP(TSCALE)
593 PM_POP(TPERIOD)
594 PM_POP(TCNTL)
595 PM_POP(IPRIO)
596 PM_POP(ILAT)
597 PM_POP(IMASK)
598 PM_POP(EVT15)
599 PM_POP(EVT14)
600 PM_POP(EVT13)
601 PM_POP(EVT12)
602 PM_POP(EVT11)
603 PM_POP(EVT10)
604 PM_POP(EVT9)
605 PM_POP(EVT8)
606 PM_POP(EVT7)
607 PM_POP(EVT6)
608 PM_POP(EVT5)
609 PM_POP(EVT4)
610 PM_POP(EVT3)
611 PM_POP(EVT2)
612 PM_POP(EVT1)
613 PM_POP(EVT0)
614 PM_POP(ICPLB_DATA15)
615 PM_POP(ICPLB_DATA14)
616 PM_POP(ICPLB_DATA13)
617 PM_POP(ICPLB_DATA12)
618 PM_POP(ICPLB_DATA11)
619 PM_POP(ICPLB_DATA10)
620 PM_POP(ICPLB_DATA9)
621 PM_POP(ICPLB_DATA8)
622 PM_POP(ICPLB_DATA7)
623 PM_POP(ICPLB_DATA6)
624 PM_POP(ICPLB_DATA5)
625 PM_POP(ICPLB_DATA4)
626 PM_POP(ICPLB_DATA3)
627 PM_POP(ICPLB_DATA2)
628 PM_POP(ICPLB_DATA1)
629 PM_POP(ICPLB_DATA0)
630 PM_POP(ICPLB_ADDR15)
631 PM_POP(ICPLB_ADDR14)
632 PM_POP(ICPLB_ADDR13)
633 PM_POP(ICPLB_ADDR12)
634 PM_POP(ICPLB_ADDR11)
635 PM_POP(ICPLB_ADDR10)
636 PM_POP(ICPLB_ADDR9)
637 PM_POP(ICPLB_ADDR8)
638 PM_POP(ICPLB_ADDR7)
639 PM_POP(ICPLB_ADDR6)
640 PM_POP(ICPLB_ADDR5)
641 PM_POP(ICPLB_ADDR4)
642 PM_POP(ICPLB_ADDR3)
643 PM_POP(ICPLB_ADDR2)
644 PM_POP(ICPLB_ADDR1)
645 PM_POP(ICPLB_ADDR0)
646 PM_POP(IMEM_CONTROL)
647 PM_POP(DCPLB_DATA15)
648 PM_POP(DCPLB_DATA14)
649 PM_POP(DCPLB_DATA13)
650 PM_POP(DCPLB_DATA12)
651 PM_POP(DCPLB_DATA11)
652 PM_POP(DCPLB_DATA10)
653 PM_POP(DCPLB_DATA9)
654 PM_POP(DCPLB_DATA8)
655 PM_POP(DCPLB_DATA7)
656 PM_POP(DCPLB_DATA6)
657 PM_POP(DCPLB_DATA5)
658 PM_POP(DCPLB_DATA4)
659 PM_POP(DCPLB_DATA3)
660 PM_POP(DCPLB_DATA2)
661 PM_POP(DCPLB_DATA1)
662 PM_POP(DCPLB_DATA0)
663 PM_POP(DCPLB_ADDR15)
664 PM_POP(DCPLB_ADDR14)
665 PM_POP(DCPLB_ADDR13)
666 PM_POP(DCPLB_ADDR12)
667 PM_POP(DCPLB_ADDR11)
668 PM_POP(DCPLB_ADDR10)
669 PM_POP(DCPLB_ADDR9)
670 PM_POP(DCPLB_ADDR8)
671 PM_POP(DCPLB_ADDR7)
672 PM_POP(DCPLB_ADDR6)
673 PM_POP(DCPLB_ADDR5)
674 PM_POP(DCPLB_ADDR4)
675 PM_POP(DCPLB_ADDR3)
676 PM_POP(DCPLB_ADDR2)
677 PM_POP(DCPLB_ADDR1)
678 PM_POP(DCPLB_ADDR0)
679 PM_POP(DMEM_CONTROL)
680
681 /* Restore System MMRs */
682
683 P0.H = hi(PLL_CTL);
684 P0.L = lo(PLL_CTL);
685 PM_SYS_POP16(SYSCR)
686
687#ifdef PORTCIO_FER
688 PM_SYS_POP16(PORTEIO_FER)
689 PM_SYS_POP16(PORTEIO)
690 PM_SYS_POP16(PORTEIO_INEN)
691 PM_SYS_POP16(PORTEIO_DIR)
692 PM_SYS_POP16(PORTDIO_FER)
693 PM_SYS_POP16(PORTDIO)
694 PM_SYS_POP16(PORTDIO_INEN)
695 PM_SYS_POP16(PORTDIO_DIR)
696 PM_SYS_POP16(PORTCIO_FER)
697 PM_SYS_POP16(PORTCIO)
698 PM_SYS_POP16(PORTCIO_INEN)
699 PM_SYS_POP16(PORTCIO_DIR)
700#endif
701
702#ifdef EBIU_FCTL
703 PM_SYS_POP(EBIU_FCTL)
704 PM_SYS_POP(EBIU_MODE)
705 PM_SYS_POP(EBIU_MBSCTL)
706#endif
707 PM_SYS_POP16(EBIU_AMGCTL)
708 PM_SYS_POP(EBIU_AMBCTL1)
709 PM_SYS_POP(EBIU_AMBCTL0)
710
711#ifdef PINT0_ASSIGN
712 PM_SYS_POP(PINT3_EDGE_SET)
713 PM_SYS_POP(PINT2_EDGE_SET)
714 PM_SYS_POP(PINT1_EDGE_SET)
715 PM_SYS_POP(PINT0_EDGE_SET)
716 PM_SYS_POP(PINT3_INVERT_SET)
717 PM_SYS_POP(PINT2_INVERT_SET)
718 PM_SYS_POP(PINT1_INVERT_SET)
719 PM_SYS_POP(PINT0_INVERT_SET)
720 PM_SYS_POP(PINT3_ASSIGN)
721 PM_SYS_POP(PINT2_ASSIGN)
722 PM_SYS_POP(PINT1_ASSIGN)
723 PM_SYS_POP(PINT0_ASSIGN)
724 PM_SYS_POP(PINT3_MASK_SET)
725 PM_SYS_POP(PINT2_MASK_SET)
726 PM_SYS_POP(PINT1_MASK_SET)
727 PM_SYS_POP(PINT0_MASK_SET)
728#endif
729
730#ifdef SIC_IWR2
731 PM_SYS_POP(SIC_IWR2)
732#endif
733#ifdef SIC_IWR1
734 PM_SYS_POP(SIC_IWR1)
735#endif
736#ifdef SIC_IWR0
737 PM_SYS_POP(SIC_IWR0)
738#endif
739#ifdef SIC_IWR
740 PM_SYS_POP(SIC_IWR)
741#endif
742
743#ifdef SIC_IAR8
744 PM_SYS_POP(SIC_IAR11)
745 PM_SYS_POP(SIC_IAR10)
746 PM_SYS_POP(SIC_IAR9)
747 PM_SYS_POP(SIC_IAR8)
748#endif
749#ifdef SIC_IAR7
750 PM_SYS_POP(SIC_IAR7)
751#endif
752#ifdef SIC_IAR6
753 PM_SYS_POP(SIC_IAR6)
754 PM_SYS_POP(SIC_IAR5)
755 PM_SYS_POP(SIC_IAR4)
756#endif
757#ifdef SIC_IAR3
758 PM_SYS_POP(SIC_IAR3)
759#endif
760#ifdef SIC_IAR0
761 PM_SYS_POP(SIC_IAR2)
762 PM_SYS_POP(SIC_IAR1)
763 PM_SYS_POP(SIC_IAR0)
764#endif
765#ifdef SIC_IMASK
766 PM_SYS_POP(SIC_IMASK)
767#endif
768#ifdef SIC_IMASK2
769 PM_SYS_POP(SIC_IMASK2)
770#endif
771#ifdef SIC_IMASK1
772 PM_SYS_POP(SIC_IMASK1)
773#endif
774#ifdef SIC_IMASK0
775 PM_SYS_POP(SIC_IMASK0)
776#endif
777 889
778 [--sp] = RETI; /* Clear Global Interrupt Disable */ 890 [--sp] = RETI; /* Clear Global Interrupt Disable */
779 SP += 4; 891 SP += 4;
780 892
781 RETS = [SP++];
782 ( R7:0, P5:0 ) = [SP++];
783 RTS; 893 RTS;
784ENDPROC(_do_hibernate) 894ENDPROC(_do_hibernate)
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 76de5724c1e3..8b4d98854403 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -85,37 +85,25 @@ ENTRY(__start)
85 SSYNC; 85 SSYNC;
86 86
87 /* in case of double faults, save a few things */ 87 /* in case of double faults, save a few things */
88 p0.l = _init_retx; 88 p1.l = _initial_pda;
89 p0.h = _init_retx; 89 p1.h = _initial_pda;
90 R0 = RETX; 90 r4 = RETX;
91 [P0] = R0;
92
93#ifdef CONFIG_DEBUG_DOUBLEFAULT 91#ifdef CONFIG_DEBUG_DOUBLEFAULT
94 /* Only save these if we are storing them, 92 /* Only save these if we are storing them,
95 * This happens here, since L1 gets clobbered 93 * This happens here, since L1 gets clobbered
96 * below 94 * below
97 */ 95 */
98 GET_PDA(p0, r0); 96 GET_PDA(p0, r0);
99 r5 = [p0 + PDA_DF_RETX]; 97 r0 = [p0 + PDA_DF_RETX];
100 p1.l = _init_saved_retx; 98 r1 = [p0 + PDA_DF_DCPLB];
101 p1.h = _init_saved_retx; 99 r2 = [p0 + PDA_DF_ICPLB];
102 [p1] = r5; 100 r3 = [p0 + PDA_DF_SEQSTAT];
103 101 [p1 + PDA_INIT_DF_RETX] = r0;
104 r5 = [p0 + PDA_DF_DCPLB]; 102 [p1 + PDA_INIT_DF_DCPLB] = r1;
105 p1.l = _init_saved_dcplb_fault_addr; 103 [p1 + PDA_INIT_DF_ICPLB] = r2;
106 p1.h = _init_saved_dcplb_fault_addr; 104 [p1 + PDA_INIT_DF_SEQSTAT] = r3;
107 [p1] = r5;
108
109 r5 = [p0 + PDA_DF_ICPLB];
110 p1.l = _init_saved_icplb_fault_addr;
111 p1.h = _init_saved_icplb_fault_addr;
112 [p1] = r5;
113
114 r5 = [p0 + PDA_DF_SEQSTAT];
115 p1.l = _init_saved_seqstat;
116 p1.h = _init_saved_seqstat;
117 [p1] = r5;
118#endif 105#endif
106 [p1 + PDA_INIT_RETX] = r4;
119 107
120 /* Initialize stack pointer */ 108 /* Initialize stack pointer */
121 sp.l = _init_thread_union + THREAD_SIZE; 109 sp.l = _init_thread_union + THREAD_SIZE;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1177369f9922..332dace6af34 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -444,7 +444,7 @@ static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
444static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); 444static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
445extern void bfin_gpio_irq_prepare(unsigned gpio); 445extern void bfin_gpio_irq_prepare(unsigned gpio);
446 446
447#if !defined(CONFIG_BF54x) 447#if !BFIN_GPIO_PINT
448 448
449static void bfin_gpio_ack_irq(struct irq_data *d) 449static void bfin_gpio_ack_irq(struct irq_data *d)
450{ 450{
@@ -633,7 +633,7 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
633 bfin_demux_gpio_block(irq); 633 bfin_demux_gpio_block(irq);
634} 634}
635 635
636#else /* CONFIG_BF54x */ 636#else
637 637
638#define NR_PINT_SYS_IRQS 4 638#define NR_PINT_SYS_IRQS 4
639#define NR_PINT_BITS 32 639#define NR_PINT_BITS 32
@@ -647,24 +647,11 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
647static unsigned char irq2pint_lut[NR_PINTS]; 647static unsigned char irq2pint_lut[NR_PINTS];
648static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; 648static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
649 649
650struct pin_int_t { 650static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
651 unsigned int mask_set; 651 (struct bfin_pint_regs *)PINT0_MASK_SET,
652 unsigned int mask_clear; 652 (struct bfin_pint_regs *)PINT1_MASK_SET,
653 unsigned int request; 653 (struct bfin_pint_regs *)PINT2_MASK_SET,
654 unsigned int assign; 654 (struct bfin_pint_regs *)PINT3_MASK_SET,
655 unsigned int edge_set;
656 unsigned int edge_clear;
657 unsigned int invert_set;
658 unsigned int invert_clear;
659 unsigned int pinstate;
660 unsigned int latch;
661};
662
663static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
664 (struct pin_int_t *)PINT0_MASK_SET,
665 (struct pin_int_t *)PINT1_MASK_SET,
666 (struct pin_int_t *)PINT2_MASK_SET,
667 (struct pin_int_t *)PINT3_MASK_SET,
668}; 655};
669 656
670inline unsigned int get_irq_base(u32 bank, u8 bmap) 657inline unsigned int get_irq_base(u32 bank, u8 bmap)
@@ -981,7 +968,7 @@ int __init init_arch_irq(void)
981 968
982 local_irq_disable(); 969 local_irq_disable();
983 970
984#ifdef CONFIG_BF54x 971#if BFIN_GPIO_PINT
985# ifdef CONFIG_PINTx_REASSIGN 972# ifdef CONFIG_PINTx_REASSIGN
986 pint[0]->assign = CONFIG_PINT0_ASSIGN; 973 pint[0]->assign = CONFIG_PINT0_ASSIGN;
987 pint[1]->assign = CONFIG_PINT1_ASSIGN; 974 pint[1]->assign = CONFIG_PINT1_ASSIGN;
@@ -999,16 +986,16 @@ int __init init_arch_irq(void)
999 irq_set_chip(irq, &bfin_internal_irqchip); 986 irq_set_chip(irq, &bfin_internal_irqchip);
1000 987
1001 switch (irq) { 988 switch (irq) {
1002#if defined(BF537_FAMILY) 989#if BFIN_GPIO_PINT
1003 case IRQ_PH_INTA_MAC_RX:
1004 case IRQ_PF_INTA_PG_INTA:
1005#elif defined(BF533_FAMILY)
1006 case IRQ_PROG_INTA:
1007#elif defined(CONFIG_BF54x)
1008 case IRQ_PINT0: 990 case IRQ_PINT0:
1009 case IRQ_PINT1: 991 case IRQ_PINT1:
1010 case IRQ_PINT2: 992 case IRQ_PINT2:
1011 case IRQ_PINT3: 993 case IRQ_PINT3:
994#elif defined(BF537_FAMILY)
995 case IRQ_PH_INTA_MAC_RX:
996 case IRQ_PF_INTA_PG_INTA:
997#elif defined(BF533_FAMILY)
998 case IRQ_PROG_INTA:
1012#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) 999#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1013 case IRQ_PORTF_INTA: 1000 case IRQ_PORTF_INTA:
1014 case IRQ_PORTG_INTA: 1001 case IRQ_PORTG_INTA:
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 35e7e1eb0188..1c143a4de5f5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -45,9 +45,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
45unsigned long blackfin_iflush_l1_entry[NR_CPUS]; 45unsigned long blackfin_iflush_l1_entry[NR_CPUS];
46#endif 46#endif
47 47
48void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, 48struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
49 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
50 *init_saved_dcplb_fault_addr_coreb;
51 49
52#define BFIN_IPI_RESCHEDULE 0 50#define BFIN_IPI_RESCHEDULE 0
53#define BFIN_IPI_CALL_FUNC 1 51#define BFIN_IPI_CALL_FUNC 1
@@ -369,13 +367,16 @@ void __cpuinit secondary_start_kernel(void)
369 if (_bfin_swrst & SWRST_DBL_FAULT_B) { 367 if (_bfin_swrst & SWRST_DBL_FAULT_B) {
370 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n"); 368 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
371#ifdef CONFIG_DEBUG_DOUBLEFAULT 369#ifdef CONFIG_DEBUG_DOUBLEFAULT
372 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", 370 printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
373 (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb); 371 initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
374 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb); 372 initial_pda_coreb.retx_doublefault);
375 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb); 373 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
374 initial_pda_coreb.dcplb_doublefault_addr);
375 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
376 initial_pda_coreb.icplb_doublefault_addr);
376#endif 377#endif
377 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", 378 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
378 init_retx_coreb); 379 initial_pda_coreb.retx);
379 } 380 }
380 381
381 /* 382 /*
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index d236ab4232ca..15c22286ae79 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -162,9 +162,7 @@ config H8300_TPU_CH
162 int "TPU channel" 162 int "TPU channel"
163 depends on H8300_TPU 163 depends on H8300_TPU
164 164
165config PREEMPT 165source "kernel/Kconfig.preempt"
166 bool "Preemptible Kernel"
167 default n
168 166
169source "mm/Kconfig" 167source "mm/Kconfig"
170 168
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 38280ef4a2af..137b277f7e56 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -101,6 +101,9 @@ config GENERIC_IOMAP
101 bool 101 bool
102 default y 102 default y
103 103
104config ARCH_CLOCKSOURCE_DATA
105 def_bool y
106
104config SCHED_OMIT_FRAME_POINTER 107config SCHED_OMIT_FRAME_POINTER
105 bool 108 bool
106 default y 109 default y
@@ -627,27 +630,6 @@ source "drivers/pci/hotplug/Kconfig"
627 630
628source "drivers/pcmcia/Kconfig" 631source "drivers/pcmcia/Kconfig"
629 632
630config DMAR
631 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
632 depends on IA64_GENERIC && ACPI && EXPERIMENTAL
633 help
634 DMA remapping (DMAR) devices support enables independent address
635 translations for Direct Memory Access (DMA) from devices.
636 These DMA remapping devices are reported via ACPI tables
637 and include PCI device scope covered by these DMA
638 remapping devices.
639
640config DMAR_DEFAULT_ON
641 def_bool y
642 prompt "Enable DMA Remapping Devices by default"
643 depends on DMAR
644 help
645 Selecting this option will enable a DMAR device at boot time if
646 one is found. If this option is not selected, DMAR support can
647 be enabled by passing intel_iommu=on to the kernel. It is
648 recommended you say N here while the DMAR code remains
649 experimental.
650
651endmenu 633endmenu
652 634
653endif 635endif
@@ -681,6 +663,3 @@ source "lib/Kconfig"
681 663
682config IOMMU_HELPER 664config IOMMU_HELPER
683 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) 665 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
684
685config IOMMU_API
686 def_bool (DMAR)
diff --git a/arch/ia64/include/asm/clocksource.h b/arch/ia64/include/asm/clocksource.h
new file mode 100644
index 000000000000..5c8596e4cb02
--- /dev/null
+++ b/arch/ia64/include/asm/clocksource.h
@@ -0,0 +1,10 @@
1/* IA64-specific clocksource additions */
2
3#ifndef _ASM_IA64_CLOCKSOURCE_H
4#define _ASM_IA64_CLOCKSOURCE_H
5
6struct arch_clocksource_data {
7 void *fsys_mmio; /* used by fsyscall asm code */
8};
9
10#endif /* _ASM_IA64_CLOCKSOURCE_H */
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index f64097b5118a..4826ff957a3d 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -115,7 +115,7 @@ int __init init_cyclone_clock(void)
115 } 115 }
116 /* initialize last tick */ 116 /* initialize last tick */
117 cyclone_mc = cyclone_timer; 117 cyclone_mc = cyclone_timer;
118 clocksource_cyclone.fsys_mmio = cyclone_timer; 118 clocksource_cyclone.archdata.fsys_mmio = cyclone_timer;
119 clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); 119 clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
120 120
121 return 0; 121 return 0;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 85118dfe9bb5..43920de425f1 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -468,7 +468,7 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
468 fsyscall_gtod_data.clk_mask = c->mask; 468 fsyscall_gtod_data.clk_mask = c->mask;
469 fsyscall_gtod_data.clk_mult = mult; 469 fsyscall_gtod_data.clk_mult = mult;
470 fsyscall_gtod_data.clk_shift = c->shift; 470 fsyscall_gtod_data.clk_shift = c->shift;
471 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; 471 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
472 fsyscall_gtod_data.clk_cycle_last = c->cycle_last; 472 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
473 473
474 /* copy kernel time structures */ 474 /* copy kernel time structures */
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index fa4d1e59deb0..9806e55f91be 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -49,6 +49,5 @@ config KVM_INTEL
49 extensions. 49 extensions.
50 50
51source drivers/vhost/Kconfig 51source drivers/vhost/Kconfig
52source drivers/virtio/Kconfig
53 52
54endif # VIRTUALIZATION 53endif # VIRTUALIZATION
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 81a1f4e6bcd8..485c42d97e83 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -112,8 +112,6 @@ static void sn_ack_irq(struct irq_data *data)
112 irq_move_irq(data); 112 irq_move_irq(data);
113} 113}
114 114
115static void sn_irq_info_free(struct rcu_head *head);
116
117struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, 115struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
118 nasid_t nasid, int slice) 116 nasid_t nasid, int slice)
119{ 117{
@@ -177,7 +175,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
177 spin_lock(&sn_irq_info_lock); 175 spin_lock(&sn_irq_info_lock);
178 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); 176 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
179 spin_unlock(&sn_irq_info_lock); 177 spin_unlock(&sn_irq_info_lock);
180 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 178 kfree_rcu(sn_irq_info, rcu);
181 179
182 180
183finish_up: 181finish_up:
@@ -338,14 +336,6 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
338 rcu_read_unlock(); 336 rcu_read_unlock();
339} 337}
340 338
341static void sn_irq_info_free(struct rcu_head *head)
342{
343 struct sn_irq_info *sn_irq_info;
344
345 sn_irq_info = container_of(head, struct sn_irq_info, rcu);
346 kfree(sn_irq_info);
347}
348
349void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) 339void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
350{ 340{
351 nasid_t nasid = sn_irq_info->irq_nasid; 341 nasid_t nasid = sn_irq_info->irq_nasid;
@@ -399,7 +389,7 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
399 spin_unlock(&sn_irq_info_lock); 389 spin_unlock(&sn_irq_info_lock);
400 if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) 390 if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
401 free_irq_vector(sn_irq_info->irq_irq); 391 free_irq_vector(sn_irq_info->irq_irq);
402 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 392 kfree_rcu(sn_irq_info, rcu);
403 pci_dev_put(pci_dev); 393 pci_dev_put(pci_dev);
404 394
405} 395}
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index c34efda122e1..0f8844e49363 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -54,7 +54,7 @@ ia64_sn_udelay (unsigned long usecs)
54 54
55void __init sn_timer_init(void) 55void __init sn_timer_init(void)
56{ 56{
57 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 57 clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR;
58 clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); 58 clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
59 59
60 ia64_udelay = &ia64_sn_udelay; 60 ia64_udelay = &ia64_sn_udelay;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 85b44e858225..b92b9445255d 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -268,17 +268,7 @@ config SCHED_OMIT_FRAME_POINTER
268 bool 268 bool
269 default y 269 default y
270 270
271config PREEMPT 271source "kernel/Kconfig.preempt"
272 bool "Preemptible Kernel"
273 help
274 This option reduces the latency of the kernel when reacting to
275 real-time or interactive events by allowing a low priority process to
276 be preempted even if it is in kernel mode executing a system call.
277 This allows applications to run more reliably even when the system is
278 under load.
279
280 Say Y here if you are building a kernel for a desktop, embedded
281 or real-time system. Say N if you are unsure.
282 272
283config SMP 273config SMP
284 bool "Symmetric multi-processing support" 274 bool "Symmetric multi-processing support"
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 8b6e201b2c20..c5748bb4ea71 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/interrupt.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <asm/natfeat.h> 21#include <asm/natfeat.h>
21#include <asm/virtconvert.h> 22#include <asm/virtconvert.h>
@@ -204,7 +205,6 @@ static struct net_device * __init nfeth_probe(int unit)
204 dev->irq = nfEtherIRQ; 205 dev->irq = nfEtherIRQ;
205 dev->netdev_ops = &nfeth_netdev_ops; 206 dev->netdev_ops = &nfeth_netdev_ops;
206 207
207 dev->flags |= NETIF_F_NO_CSUM;
208 memcpy(dev->dev_addr, mac, ETH_ALEN); 208 memcpy(dev->dev_addr, mac, ETH_ALEN);
209 209
210 priv = netdev_priv(dev); 210 priv = netdev_priv(dev);
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 746df91e5796..242be57a319c 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -19,9 +19,6 @@ enum {
19 */ 19 */
20 PCI_REASSIGN_ALL_RSRC = 0x00000001, 20 PCI_REASSIGN_ALL_RSRC = 0x00000001,
21 21
22 /* Re-assign all bus numbers */
23 PCI_REASSIGN_ALL_BUS = 0x00000002,
24
25 /* Do not try to assign, just use existing setup */ 22 /* Do not try to assign, just use existing setup */
26 PCI_PROBE_ONLY = 0x00000004, 23 PCI_PROBE_ONLY = 0x00000004,
27 24
@@ -110,16 +107,6 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
110 return bus->sysdata; 107 return bus->sysdata;
111} 108}
112 109
113static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
114{
115 struct pci_controller *host;
116
117 if (bus->self)
118 return pci_device_to_OF_node(bus->self);
119 host = pci_bus_to_host(bus);
120 return host ? host->dn : NULL;
121}
122
123static inline int isa_vaddr_is_ioport(void __iomem *address) 110static inline int isa_vaddr_is_ioport(void __iomem *address)
124{ 111{
125 /* No specific ISA handling on ppc32 at this stage, it 112 /* No specific ISA handling on ppc32 at this stage, it
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index ba65cf472544..1dd9d6b1e275 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -40,8 +40,7 @@ struct pci_dev;
40 * Set this to 1 if you want the kernel to re-assign all PCI 40 * Set this to 1 if you want the kernel to re-assign all PCI
41 * bus numbers (don't do that on ppc64 yet !) 41 * bus numbers (don't do that on ppc64 yet !)
42 */ 42 */
43#define pcibios_assign_all_busses() \ 43#define pcibios_assign_all_busses() 0
44 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
45 44
46static inline void pcibios_set_master(struct pci_dev *dev) 45static inline void pcibios_set_master(struct pci_dev *dev)
47{ 46{
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index d0890d36ef61..9bd01ecb00d6 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -29,21 +29,6 @@
29extern int early_uartlite_console(void); 29extern int early_uartlite_console(void);
30extern int early_uart16550_console(void); 30extern int early_uart16550_console(void);
31 31
32#ifdef CONFIG_PCI
33/*
34 * PCI <-> OF matching functions
35 * (XXX should these be here?)
36 */
37struct pci_bus;
38struct pci_dev;
39extern int pci_device_from_OF_node(struct device_node *node,
40 u8 *bus, u8 *devfn);
41extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
42 int devfn);
43extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
44extern void pci_create_OF_bus_map(void);
45#endif
46
47/* 32/*
48 * OF address retreival & translation 33 * OF address retreival & translation
49 */ 34 */
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
index 9889cc2e1294..d1114fbd4780 100644
--- a/arch/microblaze/pci/Makefile
+++ b/arch/microblaze/pci/Makefile
@@ -2,5 +2,5 @@
2# Makefile 2# Makefile
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o 5obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o
6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o 6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 53599067d2f9..041b1d86d75b 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -50,6 +50,11 @@ unsigned int pci_flags;
50 50
51static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 51static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
52 52
53unsigned long isa_io_base;
54unsigned long pci_dram_offset;
55static int pci_bus_count;
56
57
53void set_pci_dma_ops(struct dma_map_ops *dma_ops) 58void set_pci_dma_ops(struct dma_map_ops *dma_ops)
54{ 59{
55 pci_dma_ops = dma_ops; 60 pci_dma_ops = dma_ops;
@@ -1558,6 +1563,112 @@ void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1558 (unsigned long)hose->io_base_virt - _IO_BASE); 1563 (unsigned long)hose->io_base_virt - _IO_BASE);
1559} 1564}
1560 1565
1566struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1567{
1568 struct pci_controller *hose = bus->sysdata;
1569
1570 return of_node_get(hose->dn);
1571}
1572
1573static void __devinit pcibios_scan_phb(struct pci_controller *hose)
1574{
1575 struct pci_bus *bus;
1576 struct device_node *node = hose->dn;
1577 unsigned long io_offset;
1578 struct resource *res = &hose->io_resource;
1579
1580 pr_debug("PCI: Scanning PHB %s\n",
1581 node ? node->full_name : "<NO NAME>");
1582
1583 /* Create an empty bus for the toplevel */
1584 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
1585 if (bus == NULL) {
1586 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
1587 hose->global_number);
1588 return;
1589 }
1590 bus->secondary = hose->first_busno;
1591 hose->bus = bus;
1592
1593 /* Fixup IO space offset */
1594 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1595 res->start = (res->start + io_offset) & 0xffffffffu;
1596 res->end = (res->end + io_offset) & 0xffffffffu;
1597
1598 /* Wire up PHB bus resources */
1599 pcibios_setup_phb_resources(hose);
1600
1601 /* Scan children */
1602 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
1603}
1604
1605static int __init pcibios_init(void)
1606{
1607 struct pci_controller *hose, *tmp;
1608 int next_busno = 0;
1609
1610 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1611
1612 /* Scan all of the recorded PCI controllers. */
1613 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1614 hose->last_busno = 0xff;
1615 pcibios_scan_phb(hose);
1616 printk(KERN_INFO "calling pci_bus_add_devices()\n");
1617 pci_bus_add_devices(hose->bus);
1618 if (next_busno <= hose->last_busno)
1619 next_busno = hose->last_busno + 1;
1620 }
1621 pci_bus_count = next_busno;
1622
1623 /* Call common code to handle resource allocation */
1624 pcibios_resource_survey();
1625
1626 return 0;
1627}
1628
1629subsys_initcall(pcibios_init);
1630
1631static struct pci_controller *pci_bus_to_hose(int bus)
1632{
1633 struct pci_controller *hose, *tmp;
1634
1635 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1636 if (bus >= hose->first_busno && bus <= hose->last_busno)
1637 return hose;
1638 return NULL;
1639}
1640
1641/* Provide information on locations of various I/O regions in physical
1642 * memory. Do this on a per-card basis so that we choose the right
1643 * root bridge.
1644 * Note that the returned IO or memory base is a physical address
1645 */
1646
1647long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1648{
1649 struct pci_controller *hose;
1650 long result = -EOPNOTSUPP;
1651
1652 hose = pci_bus_to_hose(bus);
1653 if (!hose)
1654 return -ENODEV;
1655
1656 switch (which) {
1657 case IOBASE_BRIDGE_NUMBER:
1658 return (long)hose->first_busno;
1659 case IOBASE_MEMORY:
1660 return (long)hose->pci_mem_offset;
1661 case IOBASE_IO:
1662 return (long)hose->io_base_phys;
1663 case IOBASE_ISA_IO:
1664 return (long)isa_io_base;
1665 case IOBASE_ISA_MEM:
1666 return (long)isa_mem_base;
1667 }
1668
1669 return result;
1670}
1671
1561/* 1672/*
1562 * Null PCI config access functions, for the case when we can't 1673 * Null PCI config access functions, for the case when we can't
1563 * find a hose. 1674 * find a hose.
@@ -1626,3 +1737,4 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1626{ 1737{
1627 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1738 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1628} 1739}
1740
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
deleted file mode 100644
index 92728a6cfd80..000000000000
--- a/arch/microblaze/pci/pci_32.c
+++ /dev/null
@@ -1,432 +0,0 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14#include <linux/irq.h>
15#include <linux/list.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18
19#include <asm/processor.h>
20#include <asm/io.h>
21#include <asm/prom.h>
22#include <asm/sections.h>
23#include <asm/pci-bridge.h>
24#include <asm/byteorder.h>
25#include <asm/uaccess.h>
26
27#undef DEBUG
28
29unsigned long isa_io_base;
30unsigned long pci_dram_offset;
31int pcibios_assign_bus_offset = 1;
32
33static u8 *pci_to_OF_bus_map;
34
35/* By default, we don't re-assign bus numbers. We do this only on
36 * some pmacs
37 */
38static int pci_assign_all_buses;
39
40static int pci_bus_count;
41
42/*
43 * Functions below are used on OpenFirmware machines.
44 */
45static void
46make_one_node_map(struct device_node *node, u8 pci_bus)
47{
48 const int *bus_range;
49 int len;
50
51 if (pci_bus >= pci_bus_count)
52 return;
53 bus_range = of_get_property(node, "bus-range", &len);
54 if (bus_range == NULL || len < 2 * sizeof(int)) {
55 printk(KERN_WARNING "Can't get bus-range for %s, "
56 "assuming it starts at 0\n", node->full_name);
57 pci_to_OF_bus_map[pci_bus] = 0;
58 } else
59 pci_to_OF_bus_map[pci_bus] = bus_range[0];
60
61 for_each_child_of_node(node, node) {
62 struct pci_dev *dev;
63 const unsigned int *class_code, *reg;
64
65 class_code = of_get_property(node, "class-code", NULL);
66 if (!class_code ||
67 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
68 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
69 continue;
70 reg = of_get_property(node, "reg", NULL);
71 if (!reg)
72 continue;
73 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
74 if (!dev || !dev->subordinate) {
75 pci_dev_put(dev);
76 continue;
77 }
78 make_one_node_map(node, dev->subordinate->number);
79 pci_dev_put(dev);
80 }
81}
82
83void
84pcibios_make_OF_bus_map(void)
85{
86 int i;
87 struct pci_controller *hose, *tmp;
88 struct property *map_prop;
89 struct device_node *dn;
90
91 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
92 if (!pci_to_OF_bus_map) {
93 printk(KERN_ERR "Can't allocate OF bus map !\n");
94 return;
95 }
96
97 /* We fill the bus map with invalid values, that helps
98 * debugging.
99 */
100 for (i = 0; i < pci_bus_count; i++)
101 pci_to_OF_bus_map[i] = 0xff;
102
103 /* For each hose, we begin searching bridges */
104 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
105 struct device_node *node = hose->dn;
106
107 if (!node)
108 continue;
109 make_one_node_map(node, hose->first_busno);
110 }
111 dn = of_find_node_by_path("/");
112 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
113 if (map_prop) {
114 BUG_ON(pci_bus_count > map_prop->length);
115 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
116 }
117 of_node_put(dn);
118#ifdef DEBUG
119 printk(KERN_INFO "PCI->OF bus map:\n");
120 for (i = 0; i < pci_bus_count; i++) {
121 if (pci_to_OF_bus_map[i] == 0xff)
122 continue;
123 printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
124 }
125#endif
126}
127
128typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
129
130static struct device_node *scan_OF_pci_childs(struct device_node *parent,
131 pci_OF_scan_iterator filter, void *data)
132{
133 struct device_node *node;
134 struct device_node *sub_node;
135
136 for_each_child_of_node(parent, node) {
137 const unsigned int *class_code;
138
139 if (filter(node, data)) {
140 of_node_put(node);
141 return node;
142 }
143
144 /* For PCI<->PCI bridges or CardBus bridges, we go down
145 * Note: some OFs create a parent node "multifunc-device" as
146 * a fake root for all functions of a multi-function device,
147 * we go down them as well.
148 */
149 class_code = of_get_property(node, "class-code", NULL);
150 if ((!class_code ||
151 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
152 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
153 strcmp(node->name, "multifunc-device"))
154 continue;
155 sub_node = scan_OF_pci_childs(node, filter, data);
156 if (sub_node) {
157 of_node_put(node);
158 return sub_node;
159 }
160 }
161 return NULL;
162}
163
164static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
165 unsigned int devfn)
166{
167 struct device_node *np, *cnp;
168 const u32 *reg;
169 unsigned int psize;
170
171 for_each_child_of_node(parent, np) {
172 reg = of_get_property(np, "reg", &psize);
173 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
174 return np;
175
176 /* Note: some OFs create a parent node "multifunc-device" as
177 * a fake root for all functions of a multi-function device,
178 * we go down them as well. */
179 if (!strcmp(np->name, "multifunc-device")) {
180 cnp = scan_OF_for_pci_dev(np, devfn);
181 if (cnp)
182 return cnp;
183 }
184 }
185 return NULL;
186}
187
188
189static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
190{
191 struct device_node *parent, *np;
192
193 /* Are we a root bus ? */
194 if (bus->self == NULL || bus->parent == NULL) {
195 struct pci_controller *hose = pci_bus_to_host(bus);
196 if (hose == NULL)
197 return NULL;
198 return of_node_get(hose->dn);
199 }
200
201 /* not a root bus, we need to get our parent */
202 parent = scan_OF_for_pci_bus(bus->parent);
203 if (parent == NULL)
204 return NULL;
205
206 /* now iterate for children for a match */
207 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
208 of_node_put(parent);
209
210 return np;
211}
212
213/*
214 * Scans the OF tree for a device node matching a PCI device
215 */
216struct device_node *
217pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
218{
219 struct device_node *parent, *np;
220
221 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
222 parent = scan_OF_for_pci_bus(bus);
223 if (parent == NULL)
224 return NULL;
225 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
226 np = scan_OF_for_pci_dev(parent, devfn);
227 of_node_put(parent);
228 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
229
230 /* XXX most callers don't release the returned node
231 * mostly because ppc64 doesn't increase the refcount,
232 * we need to fix that.
233 */
234 return np;
235}
236EXPORT_SYMBOL(pci_busdev_to_OF_node);
237
238struct device_node*
239pci_device_to_OF_node(struct pci_dev *dev)
240{
241 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
242}
243EXPORT_SYMBOL(pci_device_to_OF_node);
244
245static int
246find_OF_pci_device_filter(struct device_node *node, void *data)
247{
248 return ((void *)node == data);
249}
250
251/*
252 * Returns the PCI device matching a given OF node
253 */
254int
255pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
256{
257 const unsigned int *reg;
258 struct pci_controller *hose;
259 struct pci_dev *dev = NULL;
260
261 /* Make sure it's really a PCI device */
262 hose = pci_find_hose_for_OF_device(node);
263 if (!hose || !hose->dn)
264 return -ENODEV;
265 if (!scan_OF_pci_childs(hose->dn,
266 find_OF_pci_device_filter, (void *)node))
267 return -ENODEV;
268 reg = of_get_property(node, "reg", NULL);
269 if (!reg)
270 return -ENODEV;
271 *bus = (reg[0] >> 16) & 0xff;
272 *devfn = ((reg[0] >> 8) & 0xff);
273
274 /* Ok, here we need some tweak. If we have already renumbered
275 * all busses, we can't rely on the OF bus number any more.
276 * the pci_to_OF_bus_map is not enough as several PCI busses
277 * may match the same OF bus number.
278 */
279 if (!pci_to_OF_bus_map)
280 return 0;
281
282 for_each_pci_dev(dev)
283 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
284 dev->devfn == *devfn) {
285 *bus = dev->bus->number;
286 pci_dev_put(dev);
287 return 0;
288 }
289
290 return -ENODEV;
291}
292EXPORT_SYMBOL(pci_device_from_OF_node);
293
294/* We create the "pci-OF-bus-map" property now so it appears in the
295 * /proc device tree
296 */
297void __init
298pci_create_OF_bus_map(void)
299{
300 struct property *of_prop;
301 struct device_node *dn;
302
303 of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
304 256);
305 if (!of_prop)
306 return;
307 dn = of_find_node_by_path("/");
308 if (dn) {
309 memset(of_prop, -1, sizeof(struct property) + 256);
310 of_prop->name = "pci-OF-bus-map";
311 of_prop->length = 256;
312 of_prop->value = &of_prop[1];
313 prom_add_property(dn, of_prop);
314 of_node_put(dn);
315 }
316}
317
318static void __devinit pcibios_scan_phb(struct pci_controller *hose)
319{
320 struct pci_bus *bus;
321 struct device_node *node = hose->dn;
322 unsigned long io_offset;
323 struct resource *res = &hose->io_resource;
324
325 pr_debug("PCI: Scanning PHB %s\n",
326 node ? node->full_name : "<NO NAME>");
327
328 /* Create an empty bus for the toplevel */
329 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
330 if (bus == NULL) {
331 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
332 hose->global_number);
333 return;
334 }
335 bus.dev->of_node = of_node_get(node);
336 bus->secondary = hose->first_busno;
337 hose->bus = bus;
338
339 /* Fixup IO space offset */
340 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
341 res->start = (res->start + io_offset) & 0xffffffffu;
342 res->end = (res->end + io_offset) & 0xffffffffu;
343
344 /* Wire up PHB bus resources */
345 pcibios_setup_phb_resources(hose);
346
347 /* Scan children */
348 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
349}
350
351static int __init pcibios_init(void)
352{
353 struct pci_controller *hose, *tmp;
354 int next_busno = 0;
355
356 printk(KERN_INFO "PCI: Probing PCI hardware\n");
357
358 if (pci_flags & PCI_REASSIGN_ALL_BUS) {
359 printk(KERN_INFO "setting pci_asign_all_busses\n");
360 pci_assign_all_buses = 1;
361 }
362
363 /* Scan all of the recorded PCI controllers. */
364 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
365 if (pci_assign_all_buses)
366 hose->first_busno = next_busno;
367 hose->last_busno = 0xff;
368 pcibios_scan_phb(hose);
369 printk(KERN_INFO "calling pci_bus_add_devices()\n");
370 pci_bus_add_devices(hose->bus);
371 if (pci_assign_all_buses || next_busno <= hose->last_busno)
372 next_busno = hose->last_busno + \
373 pcibios_assign_bus_offset;
374 }
375 pci_bus_count = next_busno;
376
377 /* OpenFirmware based machines need a map of OF bus
378 * numbers vs. kernel bus numbers since we may have to
379 * remap them.
380 */
381 if (pci_assign_all_buses)
382 pcibios_make_OF_bus_map();
383
384 /* Call common code to handle resource allocation */
385 pcibios_resource_survey();
386
387 return 0;
388}
389
390subsys_initcall(pcibios_init);
391
392static struct pci_controller*
393pci_bus_to_hose(int bus)
394{
395 struct pci_controller *hose, *tmp;
396
397 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
398 if (bus >= hose->first_busno && bus <= hose->last_busno)
399 return hose;
400 return NULL;
401}
402
403/* Provide information on locations of various I/O regions in physical
404 * memory. Do this on a per-card basis so that we choose the right
405 * root bridge.
406 * Note that the returned IO or memory base is a physical address
407 */
408
409long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
410{
411 struct pci_controller *hose;
412 long result = -EOPNOTSUPP;
413
414 hose = pci_bus_to_hose(bus);
415 if (!hose)
416 return -ENODEV;
417
418 switch (which) {
419 case IOBASE_BRIDGE_NUMBER:
420 return (long)hose->first_busno;
421 case IOBASE_MEMORY:
422 return (long)hose->pci_mem_offset;
423 case IOBASE_IO:
424 return (long)hose->io_base_phys;
425 case IOBASE_ISA_IO:
426 return (long)isa_io_base;
427 case IOBASE_ISA_MEM:
428 return (long)isa_mem_base;
429 }
430
431 return result;
432}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 653da62d0682..177cdaf83564 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -185,6 +185,7 @@ config MACH_JAZZ
185 select CSRC_R4K 185 select CSRC_R4K
186 select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN 186 select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
187 select GENERIC_ISA_DMA 187 select GENERIC_ISA_DMA
188 select HAVE_PCSPKR_PLATFORM
188 select IRQ_CPU 189 select IRQ_CPU
189 select I8253 190 select I8253
190 select I8259 191 select I8259
@@ -266,6 +267,7 @@ config MIPS_MALTA
266 select CSRC_R4K 267 select CSRC_R4K
267 select DMA_NONCOHERENT 268 select DMA_NONCOHERENT
268 select GENERIC_ISA_DMA 269 select GENERIC_ISA_DMA
270 select HAVE_PCSPKR_PLATFORM
269 select IRQ_CPU 271 select IRQ_CPU
270 select IRQ_GIC 272 select IRQ_GIC
271 select HW_HAS_PCI 273 select HW_HAS_PCI
@@ -640,6 +642,7 @@ config SNI_RM
640 select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN 642 select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN
641 select DMA_NONCOHERENT 643 select DMA_NONCOHERENT
642 select GENERIC_ISA_DMA 644 select GENERIC_ISA_DMA
645 select HAVE_PCSPKR_PLATFORM
643 select HW_HAS_EISA 646 select HW_HAS_EISA
644 select HW_HAS_PCI 647 select HW_HAS_PCI
645 select IRQ_CPU 648 select IRQ_CPU
@@ -2388,6 +2391,7 @@ config MMU
2388config I8253 2391config I8253
2389 bool 2392 bool
2390 select CLKSRC_I8253 2393 select CLKSRC_I8253
2394 select CLKEVT_I8253
2391 select MIPS_EXTERNAL_TIMER 2395 select MIPS_EXTERNAL_TIMER
2392 2396
2393config ZONE_DMA32 2397config ZONE_DMA32
@@ -2489,20 +2493,4 @@ source "security/Kconfig"
2489 2493
2490source "crypto/Kconfig" 2494source "crypto/Kconfig"
2491 2495
2492menuconfig VIRTUALIZATION
2493 bool "Virtualization"
2494 default n
2495 ---help---
2496 Say Y here to get to see options for using your Linux host to run other
2497 operating systems inside virtual machines (guests).
2498 This option alone does not add any kernel code.
2499
2500 If you say N, all options in this submenu will be skipped and disabled.
2501
2502if VIRTUALIZATION
2503
2504source drivers/virtio/Kconfig
2505
2506endif # VIRTUALIZATION
2507
2508source "lib/Kconfig" 2496source "lib/Kconfig"
diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c
index 0162f9edc693..3bff3b820baf 100644
--- a/arch/mips/cobalt/time.c
+++ b/arch/mips/cobalt/time.c
@@ -17,10 +17,10 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 19 */
20#include <linux/i8253.h>
20#include <linux/init.h> 21#include <linux/init.h>
21 22
22#include <asm/gt64120.h> 23#include <asm/gt64120.h>
23#include <asm/i8253.h>
24#include <asm/time.h> 24#include <asm/time.h>
25 25
26#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */ 26#define GT641XX_BASE_CLOCK 50000000 /* 50MHz */
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
deleted file mode 100644
index 9ad011366f73..000000000000
--- a/arch/mips/include/asm/i8253.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Machine specific IO port address definition for generic.
3 * Written by Osamu Tomita <tomita@cinet.co.jp>
4 */
5#ifndef __ASM_I8253_H
6#define __ASM_I8253_H
7
8#include <linux/spinlock.h>
9
10/* i8253A PIT registers */
11#define PIT_MODE 0x43
12#define PIT_CH0 0x40
13#define PIT_CH2 0x42
14
15#define PIT_LATCH LATCH
16
17extern raw_spinlock_t i8253_lock;
18
19extern void setup_pit_timer(void);
20
21#define inb_pit inb_p
22#define outb_pit outb_p
23
24#endif /* __ASM_I8253_H */
diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h
index 0bf82818aa53..780ee2c2a2ac 100644
--- a/arch/mips/include/asm/stacktrace.h
+++ b/arch/mips/include/asm/stacktrace.h
@@ -7,6 +7,10 @@
7extern int raw_show_trace; 7extern int raw_show_trace;
8extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, 8extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
9 unsigned long pc, unsigned long *ra); 9 unsigned long pc, unsigned long *ra);
10extern unsigned long unwind_stack_by_address(unsigned long stack_page,
11 unsigned long *sp,
12 unsigned long pc,
13 unsigned long *ra);
10#else 14#else
11#define raw_show_trace 1 15#define raw_show_trace 1
12static inline unsigned long unwind_stack(struct task_struct *task, 16static inline unsigned long unwind_stack(struct task_struct *task,
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 260df4750949..ca9bd2069142 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle 7 * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
8 */ 8 */
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/i8253.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +16,6 @@
15#include <linux/irq.h> 16#include <linux/irq.h>
16 17
17#include <asm/irq_cpu.h> 18#include <asm/irq_cpu.h>
18#include <asm/i8253.h>
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/jazz.h> 21#include <asm/jazz.h>
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 391221b6a6aa..be4ee7d63e04 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -3,96 +3,16 @@
3 * 3 *
4 */ 4 */
5#include <linux/clockchips.h> 5#include <linux/clockchips.h>
6#include <linux/init.h> 6#include <linux/i8253.h>
7#include <linux/interrupt.h>
8#include <linux/jiffies.h>
9#include <linux/module.h> 7#include <linux/module.h>
10#include <linux/smp.h> 8#include <linux/smp.h>
11#include <linux/spinlock.h>
12#include <linux/irq.h> 9#include <linux/irq.h>
13 10
14#include <asm/delay.h>
15#include <asm/i8253.h>
16#include <asm/io.h>
17#include <asm/time.h> 11#include <asm/time.h>
18 12
19DEFINE_RAW_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock);
21
22/*
23 * Initialize the PIT timer.
24 *
25 * This is also called after resume to bring the PIT into operation again.
26 */
27static void init_pit_timer(enum clock_event_mode mode,
28 struct clock_event_device *evt)
29{
30 raw_spin_lock(&i8253_lock);
31
32 switch(mode) {
33 case CLOCK_EVT_MODE_PERIODIC:
34 /* binary, mode 2, LSB/MSB, ch 0 */
35 outb_p(0x34, PIT_MODE);
36 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
37 outb(LATCH >> 8 , PIT_CH0); /* MSB */
38 break;
39
40 case CLOCK_EVT_MODE_SHUTDOWN:
41 case CLOCK_EVT_MODE_UNUSED:
42 if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
43 evt->mode == CLOCK_EVT_MODE_ONESHOT) {
44 outb_p(0x30, PIT_MODE);
45 outb_p(0, PIT_CH0);
46 outb_p(0, PIT_CH0);
47 }
48 break;
49
50 case CLOCK_EVT_MODE_ONESHOT:
51 /* One shot setup */
52 outb_p(0x38, PIT_MODE);
53 break;
54
55 case CLOCK_EVT_MODE_RESUME:
56 /* Nothing to do here */
57 break;
58 }
59 raw_spin_unlock(&i8253_lock);
60}
61
62/*
63 * Program the next event in oneshot mode
64 *
65 * Delta is given in PIT ticks
66 */
67static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
68{
69 raw_spin_lock(&i8253_lock);
70 outb_p(delta & 0xff , PIT_CH0); /* LSB */
71 outb(delta >> 8 , PIT_CH0); /* MSB */
72 raw_spin_unlock(&i8253_lock);
73
74 return 0;
75}
76
77/*
78 * On UP the PIT can serve all of the possible timer functions. On SMP systems
79 * it can be solely used for the global tick.
80 *
81 * The profiling and update capabilites are switched off once the local apic is
82 * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
83 * !using_apic_timer decisions in do_timer_interrupt_hook()
84 */
85static struct clock_event_device pit_clockevent = {
86 .name = "pit",
87 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
88 .set_mode = init_pit_timer,
89 .set_next_event = pit_next_event,
90 .irq = 0,
91};
92
93static irqreturn_t timer_interrupt(int irq, void *dev_id) 13static irqreturn_t timer_interrupt(int irq, void *dev_id)
94{ 14{
95 pit_clockevent.event_handler(&pit_clockevent); 15 i8253_clockevent.event_handler(&i8253_clockevent);
96 16
97 return IRQ_HANDLED; 17 return IRQ_HANDLED;
98} 18}
@@ -103,25 +23,9 @@ static struct irqaction irq0 = {
103 .name = "timer" 23 .name = "timer"
104}; 24};
105 25
106/*
107 * Initialize the conversion factor and the min/max deltas of the clock event
108 * structure and register the clock event source with the framework.
109 */
110void __init setup_pit_timer(void) 26void __init setup_pit_timer(void)
111{ 27{
112 struct clock_event_device *cd = &pit_clockevent; 28 clockevent_i8253_init(true);
113 unsigned int cpu = smp_processor_id();
114
115 /*
116 * Start pit with the boot cpu mask and make it global after the
117 * IO_APIC has been initialized.
118 */
119 cd->cpumask = cpumask_of(cpu);
120 clockevent_set_clock(cd, CLOCK_TICK_RATE);
121 cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
122 cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
123 clockevents_register_device(cd);
124
125 setup_irq(0, &irq0); 29 setup_irq(0, &irq0);
126} 30}
127 31
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index c018696765d4..5c74eb797f08 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,7 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/sysdev.h> 17#include <linux/syscore_ops.h>
18#include <linux/irq.h> 18#include <linux/irq.h>
19 19
20#include <asm/i8259.h> 20#include <asm/i8259.h>
@@ -215,14 +215,13 @@ spurious_8259A_irq:
215 } 215 }
216} 216}
217 217
218static int i8259A_resume(struct sys_device *dev) 218static void i8259A_resume(void)
219{ 219{
220 if (i8259A_auto_eoi >= 0) 220 if (i8259A_auto_eoi >= 0)
221 init_8259A(i8259A_auto_eoi); 221 init_8259A(i8259A_auto_eoi);
222 return 0;
223} 222}
224 223
225static int i8259A_shutdown(struct sys_device *dev) 224static void i8259A_shutdown(void)
226{ 225{
227 /* Put the i8259A into a quiescent state that 226 /* Put the i8259A into a quiescent state that
228 * the kernel initialization code can get it 227 * the kernel initialization code can get it
@@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
232 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
233 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 232 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
234 } 233 }
235 return 0;
236} 234}
237 235
238static struct sysdev_class i8259_sysdev_class = { 236static struct syscore_ops i8259_syscore_ops = {
239 .name = "i8259",
240 .resume = i8259A_resume, 237 .resume = i8259A_resume,
241 .shutdown = i8259A_shutdown, 238 .shutdown = i8259A_shutdown,
242}; 239};
243 240
244static struct sys_device device_i8259A = {
245 .id = 0,
246 .cls = &i8259_sysdev_class,
247};
248
249static int __init i8259A_init_sysfs(void) 241static int __init i8259A_init_sysfs(void)
250{ 242{
251 int error = sysdev_class_register(&i8259_sysdev_class); 243 register_syscore_ops(&i8259_syscore_ops);
252 if (!error) 244 return 0;
253 error = sysdev_register(&device_i8259A);
254 return error;
255} 245}
256 246
257device_initcall(i8259A_init_sysfs); 247device_initcall(i8259A_init_sysfs);
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index a8244854d3dc..d0deaab9ace2 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
527 if (!mipspmu_event_set_period(event, hwc, idx)) 527 if (!mipspmu_event_set_period(event, hwc, idx))
528 return; 528 return;
529 529
530 if (perf_event_overflow(event, 0, data, regs)) 530 if (perf_event_overflow(event, data, regs))
531 mipspmu->disable_event(idx); 531 mipspmu->disable_event(idx);
532} 532}
533 533
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 75266ff4cc33..e5ad09a9baf7 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map
377 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 377 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
378 }, 378 },
379}, 379},
380[C(NODE)] = {
381 [C(OP_READ)] = {
382 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
383 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
384 },
385 [C(OP_WRITE)] = {
386 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
387 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
388 },
389 [C(OP_PREFETCH)] = {
390 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
391 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
392 },
393},
380}; 394};
381 395
382/* 74K core has completely different cache event map. */ 396/* 74K core has completely different cache event map. */
@@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
480 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 494 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
481 }, 495 },
482}, 496},
497[C(NODE)] = {
498 [C(OP_READ)] = {
499 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
500 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
501 },
502 [C(OP_WRITE)] = {
503 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
504 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
505 },
506 [C(OP_PREFETCH)] = {
507 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
508 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
509 },
510},
483}; 511};
484 512
485#ifdef CONFIG_MIPS_MT_SMP 513#ifdef CONFIG_MIPS_MT_SMP
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d2112d3cf115..c28fbe6107bc 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
373 373
374 374
375#ifdef CONFIG_KALLSYMS 375#ifdef CONFIG_KALLSYMS
376/* used by show_backtrace() */ 376/* generic stack unwinding function */
377unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, 377unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
378 unsigned long pc, unsigned long *ra) 378 unsigned long *sp,
379 unsigned long pc,
380 unsigned long *ra)
379{ 381{
380 unsigned long stack_page;
381 struct mips_frame_info info; 382 struct mips_frame_info info;
382 unsigned long size, ofs; 383 unsigned long size, ofs;
383 int leaf; 384 int leaf;
384 extern void ret_from_irq(void); 385 extern void ret_from_irq(void);
385 extern void ret_from_exception(void); 386 extern void ret_from_exception(void);
386 387
387 stack_page = (unsigned long)task_stack_page(task);
388 if (!stack_page) 388 if (!stack_page)
389 return 0; 389 return 0;
390 390
@@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
443 *ra = 0; 443 *ra = 0;
444 return __kernel_text_address(pc) ? pc : 0; 444 return __kernel_text_address(pc) ? pc : 0;
445} 445}
446EXPORT_SYMBOL(unwind_stack_by_address);
447
448/* used by show_backtrace() */
449unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
450 unsigned long pc, unsigned long *ra)
451{
452 unsigned long stack_page = (unsigned long)task_stack_page(task);
453 return unwind_stack_by_address(stack_page, sp, pc, ra);
454}
446#endif 455#endif
447 456
448/* 457/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e9b3af27d844..b7517e3abc85 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
578{ 578{
579 if ((opcode & OPCODE) == LL) { 579 if ((opcode & OPCODE) == LL) {
580 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 580 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
581 1, 0, regs, 0); 581 1, regs, 0);
582 return simulate_ll(regs, opcode); 582 return simulate_ll(regs, opcode);
583 } 583 }
584 if ((opcode & OPCODE) == SC) { 584 if ((opcode & OPCODE) == SC) {
585 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 585 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
586 1, 0, regs, 0); 586 1, regs, 0);
587 return simulate_sc(regs, opcode); 587 return simulate_sc(regs, opcode);
588 } 588 }
589 589
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
602 int rd = (opcode & RD) >> 11; 602 int rd = (opcode & RD) >> 11;
603 int rt = (opcode & RT) >> 16; 603 int rt = (opcode & RT) >> 16;
604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605 1, 0, regs, 0); 605 1, regs, 0);
606 switch (rd) { 606 switch (rd) {
607 case 0: /* CPU number */ 607 case 0: /* CPU number */
608 regs->regs[rt] = smp_processor_id(); 608 regs->regs[rt] = smp_processor_id();
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
640{ 640{
641 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { 641 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
642 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 642 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
643 1, 0, regs, 0); 643 1, regs, 0);
644 return 0; 644 return 0;
645 } 645 }
646 646
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index cfea1adfa153..eb319b580353 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
111 unsigned long value; 111 unsigned long value;
112 unsigned int res; 112 unsigned int res;
113 113
114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
115 1, 0, regs, 0);
116 115
117 /* 116 /*
118 * This load never faults. 117 * This load never faults.
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
517 mm_segment_t seg; 516 mm_segment_t seg;
518 517
519 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 518 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
520 1, 0, regs, regs->cp0_badvaddr); 519 1, regs, regs->cp0_badvaddr);
521 /* 520 /*
522 * Did we catch a fault trying to load an instruction? 521 * Did we catch a fault trying to load an instruction?
523 * Or are we running in MIPS16 mode? 522 * Or are we running in MIPS16 mode?
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index d32cb0503110..dbf2f93a5091 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
272 } 272 }
273 273
274 emul: 274 emul:
275 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 275 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
276 1, 0, xcp, 0);
277 MIPS_FPU_EMU_INC_STATS(emulated); 276 MIPS_FPU_EMU_INC_STATS(emulated);
278 switch (MIPSInst_OPCODE(ir)) { 277 switch (MIPSInst_OPCODE(ir)) {
279 case ldc1_op:{ 278 case ldc1_op:{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 137ee76a0045..937cf3368164 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -145,7 +145,7 @@ good_area:
145 * the fault. 145 * the fault.
146 */ 146 */
147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
149 if (unlikely(fault & VM_FAULT_ERROR)) { 149 if (unlikely(fault & VM_FAULT_ERROR)) {
150 if (fault & VM_FAULT_OOM) 150 if (fault & VM_FAULT_OOM)
151 goto out_of_memory; 151 goto out_of_memory;
@@ -154,12 +154,10 @@ good_area:
154 BUG(); 154 BUG();
155 } 155 }
156 if (fault & VM_FAULT_MAJOR) { 156 if (fault & VM_FAULT_MAJOR) {
157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
158 1, 0, regs, address);
159 tsk->maj_flt++; 158 tsk->maj_flt++;
160 } else { 159 } else {
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 160 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
162 1, 0, regs, address);
163 tsk->min_flt++; 161 tsk->min_flt++;
164 } 162 }
165 163
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 1620b83cd13e..f8ee945ee411 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/i8253.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
@@ -31,7 +32,6 @@
31#include <asm/mipsregs.h> 32#include <asm/mipsregs.h>
32#include <asm/mipsmtregs.h> 33#include <asm/mipsmtregs.h>
33#include <asm/hardirq.h> 34#include <asm/hardirq.h>
34#include <asm/i8253.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/div64.h> 36#include <asm/div64.h>
37#include <asm/cpu.h> 37#include <asm/cpu.h>
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 4b9d7044e26c..29f2f13eb31c 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
8 oprofilefs.o oprofile_stats.o \ 8 oprofilefs.o oprofile_stats.o \
9 timer_int.o ) 9 timer_int.o )
10 10
11oprofile-y := $(DRIVER_OBJS) common.o 11oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
12 12
13oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o 13oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
14oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o 14oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
new file mode 100644
index 000000000000..6854ed5097d2
--- /dev/null
+++ b/arch/mips/oprofile/backtrace.c
@@ -0,0 +1,175 @@
1#include <linux/oprofile.h>
2#include <linux/sched.h>
3#include <linux/mm.h>
4#include <linux/uaccess.h>
5#include <asm/ptrace.h>
6#include <asm/stacktrace.h>
7#include <linux/stacktrace.h>
8#include <linux/kernel.h>
9#include <asm/sections.h>
10#include <asm/inst.h>
11
12struct stackframe {
13 unsigned long sp;
14 unsigned long pc;
15 unsigned long ra;
16};
17
18static inline int get_mem(unsigned long addr, unsigned long *result)
19{
20 unsigned long *address = (unsigned long *) addr;
21 if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
22 return -1;
23 if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
24 return -3;
25 return 0;
26}
27
28/*
29 * These two instruction helpers were taken from process.c
30 */
31static inline int is_ra_save_ins(union mips_instruction *ip)
32{
33 /* sw / sd $ra, offset($sp) */
34 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
35 && ip->i_format.rs == 29 && ip->i_format.rt == 31;
36}
37
38static inline int is_sp_move_ins(union mips_instruction *ip)
39{
40 /* addiu/daddiu sp,sp,-imm */
41 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
42 return 0;
43 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
44 return 1;
45 return 0;
46}
47
48/*
49 * Looks for specific instructions that mark the end of a function.
50 * This usually means we ran into the code area of the previous function.
51 */
52static inline int is_end_of_function_marker(union mips_instruction *ip)
53{
54 /* jr ra */
55 if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
56 return 1;
57 /* lui gp */
58 if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
59 return 1;
60 return 0;
61}
62
63/*
64 * TODO for userspace stack unwinding:
65 * - handle cases where the stack is adjusted inside a function
66 * (generally doesn't happen)
67 * - find optimal value for max_instr_check
68 * - try to find a way to handle leaf functions
69 */
70
71static inline int unwind_user_frame(struct stackframe *old_frame,
72 const unsigned int max_instr_check)
73{
74 struct stackframe new_frame = *old_frame;
75 off_t ra_offset = 0;
76 size_t stack_size = 0;
77 unsigned long addr;
78
79 if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
80 return -9;
81
82 for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
83 && (!ra_offset || !stack_size); --addr) {
84 union mips_instruction ip;
85
86 if (get_mem(addr, (unsigned long *) &ip))
87 return -11;
88
89 if (is_sp_move_ins(&ip)) {
90 int stack_adjustment = ip.i_format.simmediate;
91 if (stack_adjustment > 0)
92 /* This marks the end of the previous function,
93 which means we overran. */
94 break;
95 stack_size = (unsigned) stack_adjustment;
96 } else if (is_ra_save_ins(&ip)) {
97 int ra_slot = ip.i_format.simmediate;
98 if (ra_slot < 0)
99 /* This shouldn't happen. */
100 break;
101 ra_offset = ra_slot;
102 } else if (is_end_of_function_marker(&ip))
103 break;
104 }
105
106 if (!ra_offset || !stack_size)
107 return -1;
108
109 if (ra_offset) {
110 new_frame.ra = old_frame->sp + ra_offset;
111 if (get_mem(new_frame.ra, &(new_frame.ra)))
112 return -13;
113 }
114
115 if (stack_size) {
116 new_frame.sp = old_frame->sp + stack_size;
117 if (get_mem(new_frame.sp, &(new_frame.sp)))
118 return -14;
119 }
120
121 if (new_frame.sp > old_frame->sp)
122 return -2;
123
124 new_frame.pc = old_frame->ra;
125 *old_frame = new_frame;
126
127 return 0;
128}
129
130static inline void do_user_backtrace(unsigned long low_addr,
131 struct stackframe *frame,
132 unsigned int depth)
133{
134 const unsigned int max_instr_check = 512;
135 const unsigned long high_addr = low_addr + THREAD_SIZE;
136
137 while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
138 oprofile_add_trace(frame->ra);
139 if (frame->sp < low_addr || frame->sp > high_addr)
140 break;
141 }
142}
143
144#ifndef CONFIG_KALLSYMS
145static inline void do_kernel_backtrace(unsigned long low_addr,
146 struct stackframe *frame,
147 unsigned int depth) { }
148#else
149static inline void do_kernel_backtrace(unsigned long low_addr,
150 struct stackframe *frame,
151 unsigned int depth)
152{
153 while (depth-- && frame->pc) {
154 frame->pc = unwind_stack_by_address(low_addr,
155 &(frame->sp),
156 frame->pc,
157 &(frame->ra));
158 oprofile_add_trace(frame->ra);
159 }
160}
161#endif
162
163void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
164{
165 struct stackframe frame = { .sp = regs->regs[29],
166 .pc = regs->cp0_epc,
167 .ra = regs->regs[31] };
168 const int userspace = user_mode(regs);
169 const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
170
171 if (userspace)
172 do_user_backtrace(low_addr, &frame, depth);
173 else
174 do_kernel_backtrace(low_addr, &frame, depth);
175}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index f9eb1aba6345..d1f2d4c52d42 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
115 ops->start = op_mips_start; 115 ops->start = op_mips_start;
116 ops->stop = op_mips_stop; 116 ops->stop = op_mips_stop;
117 ops->cpu_type = lmodel->cpu_type; 117 ops->cpu_type = lmodel->cpu_type;
118 ops->backtrace = op_mips_backtrace;
118 119
119 printk(KERN_INFO "oprofile: using %s performance monitoring.\n", 120 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
120 lmodel->cpu_type); 121 lmodel->cpu_type);
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index f04b54fb37d1..7c2da27ece04 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -36,4 +36,6 @@ struct op_mips_model {
36 unsigned char num_counters; 36 unsigned char num_counters;
37}; 37};
38 38
39void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
40
39#endif 41#endif
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index 1a94c9894188..607192449335 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -10,6 +10,7 @@
10 * Copyright (C) 2003, 06 Ralf Baechle (ralf@linux-mips.org) 10 * Copyright (C) 2003, 06 Ralf Baechle (ralf@linux-mips.org)
11 */ 11 */
12#include <linux/bcd.h> 12#include <linux/bcd.h>
13#include <linux/i8253.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/irq.h> 15#include <linux/irq.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -20,7 +21,6 @@
20 21
21#include <asm/cpu.h> 22#include <asm/cpu.h>
22#include <asm/mipsregs.h> 23#include <asm/mipsregs.h>
23#include <asm/i8253.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/time.h> 26#include <asm/time.h>
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 0904d4d30cb3..ec0be14996a4 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -1,11 +1,11 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/i8253.h>
2#include <linux/interrupt.h> 3#include <linux/interrupt.h>
3#include <linux/irq.h> 4#include <linux/irq.h>
4#include <linux/smp.h> 5#include <linux/smp.h>
5#include <linux/time.h> 6#include <linux/time.h>
6#include <linux/clockchips.h> 7#include <linux/clockchips.h>
7 8
8#include <asm/i8253.h>
9#include <asm/sni.h> 9#include <asm/sni.h>
10#include <asm/time.h> 10#include <asm/time.h>
11#include <asm-generic/rtc.h> 11#include <asm-generic/rtc.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2729c6663d8a..cdf7a0a64406 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,7 @@ config PPC
134 select GENERIC_IRQ_SHOW_LEVEL 134 select GENERIC_IRQ_SHOW_LEVEL
135 select HAVE_RCU_TABLE_FREE if SMP 135 select HAVE_RCU_TABLE_FREE if SMP
136 select HAVE_SYSCALL_TRACEPOINTS 136 select HAVE_SYSCALL_TRACEPOINTS
137 select HAVE_BPF_JIT if (PPC64 && NET)
137 138
138config EARLY_PRINTK 139config EARLY_PRINTK
139 bool 140 bool
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b7212b619c52..b94740f36b1a 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \
154 arch/powerpc/lib/ \ 154 arch/powerpc/lib/ \
155 arch/powerpc/sysdev/ \ 155 arch/powerpc/sysdev/ \
156 arch/powerpc/platforms/ \ 156 arch/powerpc/platforms/ \
157 arch/powerpc/math-emu/ 157 arch/powerpc/math-emu/ \
158 arch/powerpc/net/
158core-$(CONFIG_XMON) += arch/powerpc/xmon/ 159core-$(CONFIG_XMON) += arch/powerpc/xmon/
159core-$(CONFIG_KVM) += arch/powerpc/kvm/ 160core-$(CONFIG_KVM) += arch/powerpc/kvm/
160 161
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a7..000000000000
--- a/arch/powerpc/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 45921672b97a..2cc41c715d2b 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
78#define PPC_WARN_EMULATED(type, regs) \ 78#define PPC_WARN_EMULATED(type, regs) \
79 do { \ 79 do { \
80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ 80 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
81 1, 0, regs, 0); \ 81 1, regs, 0); \
82 __PPC_WARN_EMULATED(type); \ 82 __PPC_WARN_EMULATED(type); \
83 } while (0) 83 } while (0)
84 84
85#define PPC_WARN_ALIGNMENT(type, regs) \ 85#define PPC_WARN_ALIGNMENT(type, regs) \
86 do { \ 86 do { \
87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ 87 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
88 1, 0, regs, regs->dar); \ 88 1, regs, regs->dar); \
89 __PPC_WARN_EMULATED(type); \ 89 __PPC_WARN_EMULATED(type); \
90 } while (0) 90 } while (0)
91 91
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 1c33ec17ca36..80fd4d2b4a62 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp);
57extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); 57extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
58 58
59extern struct pmu perf_ops_bp; 59extern struct pmu perf_ops_bp;
60extern void ptrace_triggered(struct perf_event *bp, int nmi, 60extern void ptrace_triggered(struct perf_event *bp,
61 struct perf_sample_data *data, struct pt_regs *regs); 61 struct perf_sample_data *data, struct pt_regs *regs);
62static inline void hw_breakpoint_disable(void) 62static inline void hw_breakpoint_disable(void)
63{ 63{
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index b90dbf8e5cd9..90bd3ed48165 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -171,15 +171,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
171 171
172#ifndef CONFIG_PPC64 172#ifndef CONFIG_PPC64
173 173
174static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 174extern int pci_device_from_OF_node(struct device_node *node,
175{ 175 u8 *bus, u8 *devfn);
176 struct pci_controller *host; 176extern void pci_create_OF_bus_map(void);
177
178 if (bus->self)
179 return pci_device_to_OF_node(bus->self);
180 host = pci_bus_to_host(bus);
181 return host ? host->dn : NULL;
182}
183 177
184static inline int isa_vaddr_is_ioport(void __iomem *address) 178static inline int isa_vaddr_is_ioport(void __iomem *address)
185{ 179{
@@ -223,17 +217,8 @@ struct pci_dn {
223/* Get the pointer to a device_node's pci_dn */ 217/* Get the pointer to a device_node's pci_dn */
224#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) 218#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
225 219
226extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
227extern void * update_dn_pci_info(struct device_node *dn, void *data); 220extern void * update_dn_pci_info(struct device_node *dn, void *data);
228 221
229/* Get a device_node from a pci_dev. This code must be fast except
230 * in the case where the sysdata is incorrect and needs to be fixed
231 * up (this will only happen once). */
232static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
233{
234 return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
235}
236
237static inline int pci_device_from_OF_node(struct device_node *np, 222static inline int pci_device_from_OF_node(struct device_node *np,
238 u8 *bus, u8 *devfn) 223 u8 *bus, u8 *devfn)
239{ 224{
@@ -244,14 +229,6 @@ static inline int pci_device_from_OF_node(struct device_node *np,
244 return 0; 229 return 0;
245} 230}
246 231
247static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
248{
249 if (bus->self)
250 return pci_device_to_OF_node(bus->self);
251 else
252 return bus->dev.of_node; /* Must be root bus (PHB) */
253}
254
255/** Find the bus corresponding to the indicated device node */ 232/** Find the bus corresponding to the indicated device node */
256extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 233extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
257 234
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 7d7790954e02..1f522680ea17 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
179extern struct pci_dev *of_create_pci_dev(struct device_node *node, 179extern struct pci_dev *of_create_pci_dev(struct device_node *node,
180 struct pci_bus *bus, int devfn); 180 struct pci_bus *bus, int devfn);
181 181
182extern void of_scan_pci_bridge(struct device_node *node, 182extern void of_scan_pci_bridge(struct pci_dev *dev);
183 struct pci_dev *dev);
184 183
185extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); 184extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
186extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); 185extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e472659d906c..e980faae4225 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -71,6 +71,42 @@
71#define PPC_INST_ERATSX 0x7c000126 71#define PPC_INST_ERATSX 0x7c000126
72#define PPC_INST_ERATSX_DOT 0x7c000127 72#define PPC_INST_ERATSX_DOT 0x7c000127
73 73
74/* Misc instructions for BPF compiler */
75#define PPC_INST_LD 0xe8000000
76#define PPC_INST_LHZ 0xa0000000
77#define PPC_INST_LWZ 0x80000000
78#define PPC_INST_STD 0xf8000000
79#define PPC_INST_STDU 0xf8000001
80#define PPC_INST_MFLR 0x7c0802a6
81#define PPC_INST_MTLR 0x7c0803a6
82#define PPC_INST_CMPWI 0x2c000000
83#define PPC_INST_CMPDI 0x2c200000
84#define PPC_INST_CMPLW 0x7c000040
85#define PPC_INST_CMPLWI 0x28000000
86#define PPC_INST_ADDI 0x38000000
87#define PPC_INST_ADDIS 0x3c000000
88#define PPC_INST_ADD 0x7c000214
89#define PPC_INST_SUB 0x7c000050
90#define PPC_INST_BLR 0x4e800020
91#define PPC_INST_BLRL 0x4e800021
92#define PPC_INST_MULLW 0x7c0001d6
93#define PPC_INST_MULHWU 0x7c000016
94#define PPC_INST_MULLI 0x1c000000
95#define PPC_INST_DIVWU 0x7c0003d6
96#define PPC_INST_RLWINM 0x54000000
97#define PPC_INST_RLDICR 0x78000004
98#define PPC_INST_SLW 0x7c000030
99#define PPC_INST_SRW 0x7c000430
100#define PPC_INST_AND 0x7c000038
101#define PPC_INST_ANDDOT 0x7c000039
102#define PPC_INST_OR 0x7c000378
103#define PPC_INST_ANDI 0x70000000
104#define PPC_INST_ORI 0x60000000
105#define PPC_INST_ORIS 0x64000000
106#define PPC_INST_NEG 0x7c0000d0
107#define PPC_INST_BRANCH 0x48000000
108#define PPC_INST_BRANCH_COND 0x40800000
109
74/* macros to insert fields into opcodes */ 110/* macros to insert fields into opcodes */
75#define __PPC_RA(a) (((a) & 0x1f) << 16) 111#define __PPC_RA(a) (((a) & 0x1f) << 16)
76#define __PPC_RB(b) (((b) & 0x1f) << 11) 112#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -83,6 +119,10 @@
83#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 119#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
84#define __PPC_WC(w) (((w) & 0x3) << 21) 120#define __PPC_WC(w) (((w) & 0x3) << 21)
85#define __PPC_WS(w) (((w) & 0x1f) << 11) 121#define __PPC_WS(w) (((w) & 0x1f) << 11)
122#define __PPC_SH(s) __PPC_WS(s)
123#define __PPC_MB(s) (((s) & 0x1f) << 6)
124#define __PPC_ME(s) (((s) & 0x1f) << 1)
125#define __PPC_BI(s) (((s) & 0x1f) << 16)
86 126
87/* 127/*
88 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a 128 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c189aa5fe1f4..b823536375dc 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -22,20 +22,6 @@
22 22
23#define HAVE_ARCH_DEVTREE_FIXUPS 23#define HAVE_ARCH_DEVTREE_FIXUPS
24 24
25#ifdef CONFIG_PPC32
26/*
27 * PCI <-> OF matching functions
28 * (XXX should these be here?)
29 */
30struct pci_bus;
31struct pci_dev;
32extern int pci_device_from_OF_node(struct device_node *node,
33 u8* bus, u8* devfn);
34extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
35extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
36extern void pci_create_OF_bus_map(void);
37#endif
38
39/* 25/*
40 * OF address retreival & translation 26 * OF address retreival & translation
41 */ 27 */
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
index b150b510510f..cb2e2949c8d1 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/kernel/e500-pmu.c
@@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
75 [C(OP_WRITE)] = { -1, -1 }, 75 [C(OP_WRITE)] = { -1, -1 },
76 [C(OP_PREFETCH)] = { -1, -1 }, 76 [C(OP_PREFETCH)] = { -1, -1 },
77 }, 77 },
78 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
79 [C(OP_READ)] = { -1, -1 },
80 [C(OP_WRITE)] = { -1, -1 },
81 [C(OP_PREFETCH)] = { -1, -1 },
82 },
78}; 83};
79 84
80static int num_events = 128; 85static int num_events = 128;
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index 2cc5e0301d0b..845a58478890 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
388 [C(OP_WRITE)] = { -1, -1 }, 388 [C(OP_WRITE)] = { -1, -1 },
389 [C(OP_PREFETCH)] = { -1, -1 }, 389 [C(OP_PREFETCH)] = { -1, -1 },
390 }, 390 },
391 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
392 [C(OP_READ)] = { -1, -1 },
393 [C(OP_WRITE)] = { -1, -1 },
394 [C(OP_PREFETCH)] = { -1, -1 },
395 },
391}; 396};
392 397
393struct power_pmu mpc7450_pmu = { 398struct power_pmu mpc7450_pmu = {
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 893af2a9cd03..a3c92770e422 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1097 if (dev->is_added) 1097 if (dev->is_added)
1098 continue; 1098 continue;
1099 1099
1100 /* Setup OF node pointer in the device */
1101 dev->dev.of_node = pci_device_to_OF_node(dev);
1102
1103 /* Fixup NUMA node as it may not be setup yet by the generic 1100 /* Fixup NUMA node as it may not be setup yet by the generic
1104 * code and is needed by the DMA init 1101 * code and is needed by the DMA init
1105 */ 1102 */
@@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1685 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1682 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1686} 1683}
1687 1684
1685struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1686{
1687 struct pci_controller *hose = bus->sysdata;
1688
1689 return of_node_get(hose->dn);
1690}
1691
1688/** 1692/**
1689 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus 1693 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1690 * @hose: Pointer to the PCI host controller instance structure 1694 * @hose: Pointer to the PCI host controller instance structure
@@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
1705 hose->global_number); 1709 hose->global_number);
1706 return; 1710 return;
1707 } 1711 }
1708 bus->dev.of_node = of_node_get(node);
1709 bus->secondary = hose->first_busno; 1712 bus->secondary = hose->first_busno;
1710 hose->bus = bus; 1713 hose->bus = bus;
1711 1714
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index bedb370459f2..86585508e9c1 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -167,150 +167,26 @@ pcibios_make_OF_bus_map(void)
167#endif 167#endif
168} 168}
169 169
170typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
171
172static struct device_node*
173scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
174{
175 struct device_node *node;
176 struct device_node* sub_node;
177
178 for_each_child_of_node(parent, node) {
179 const unsigned int *class_code;
180
181 if (filter(node, data)) {
182 of_node_put(node);
183 return node;
184 }
185
186 /* For PCI<->PCI bridges or CardBus bridges, we go down
187 * Note: some OFs create a parent node "multifunc-device" as
188 * a fake root for all functions of a multi-function device,
189 * we go down them as well.
190 */
191 class_code = of_get_property(node, "class-code", NULL);
192 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
193 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
194 strcmp(node->name, "multifunc-device"))
195 continue;
196 sub_node = scan_OF_pci_childs(node, filter, data);
197 if (sub_node) {
198 of_node_put(node);
199 return sub_node;
200 }
201 }
202 return NULL;
203}
204
205static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
206 unsigned int devfn)
207{
208 struct device_node *np, *cnp;
209 const u32 *reg;
210 unsigned int psize;
211
212 for_each_child_of_node(parent, np) {
213 reg = of_get_property(np, "reg", &psize);
214 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
215 return np;
216
217 /* Note: some OFs create a parent node "multifunc-device" as
218 * a fake root for all functions of a multi-function device,
219 * we go down them as well. */
220 if (!strcmp(np->name, "multifunc-device")) {
221 cnp = scan_OF_for_pci_dev(np, devfn);
222 if (cnp)
223 return cnp;
224 }
225 }
226 return NULL;
227}
228
229
230static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
231{
232 struct device_node *parent, *np;
233
234 /* Are we a root bus ? */
235 if (bus->self == NULL || bus->parent == NULL) {
236 struct pci_controller *hose = pci_bus_to_host(bus);
237 if (hose == NULL)
238 return NULL;
239 return of_node_get(hose->dn);
240 }
241
242 /* not a root bus, we need to get our parent */
243 parent = scan_OF_for_pci_bus(bus->parent);
244 if (parent == NULL)
245 return NULL;
246
247 /* now iterate for children for a match */
248 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
249 of_node_put(parent);
250
251 return np;
252}
253
254/*
255 * Scans the OF tree for a device node matching a PCI device
256 */
257struct device_node *
258pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
259{
260 struct device_node *parent, *np;
261
262 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
263 parent = scan_OF_for_pci_bus(bus);
264 if (parent == NULL)
265 return NULL;
266 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
267 np = scan_OF_for_pci_dev(parent, devfn);
268 of_node_put(parent);
269 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
270
271 /* XXX most callers don't release the returned node
272 * mostly because ppc64 doesn't increase the refcount,
273 * we need to fix that.
274 */
275 return np;
276}
277EXPORT_SYMBOL(pci_busdev_to_OF_node);
278
279struct device_node*
280pci_device_to_OF_node(struct pci_dev *dev)
281{
282 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
283}
284EXPORT_SYMBOL(pci_device_to_OF_node);
285
286static int
287find_OF_pci_device_filter(struct device_node* node, void* data)
288{
289 return ((void *)node == data);
290}
291 170
292/* 171/*
293 * Returns the PCI device matching a given OF node 172 * Returns the PCI device matching a given OF node
294 */ 173 */
295int 174int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
296pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
297{ 175{
298 const unsigned int *reg; 176 struct pci_dev *dev = NULL;
299 struct pci_controller* hose; 177 const __be32 *reg;
300 struct pci_dev* dev = NULL; 178 int size;
301 179
302 /* Make sure it's really a PCI device */ 180 /* Check if it might have a chance to be a PCI device */
303 hose = pci_find_hose_for_OF_device(node); 181 if (!pci_find_hose_for_OF_device(node))
304 if (!hose || !hose->dn)
305 return -ENODEV;
306 if (!scan_OF_pci_childs(hose->dn,
307 find_OF_pci_device_filter, (void *)node))
308 return -ENODEV; 182 return -ENODEV;
309 reg = of_get_property(node, "reg", NULL); 183
310 if (!reg) 184 reg = of_get_property(node, "reg", &size);
185 if (!reg || size < 5 * sizeof(u32))
311 return -ENODEV; 186 return -ENODEV;
312 *bus = (reg[0] >> 16) & 0xff; 187
313 *devfn = ((reg[0] >> 8) & 0xff); 188 *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff;
189 *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff;
314 190
315 /* Ok, here we need some tweak. If we have already renumbered 191 /* Ok, here we need some tweak. If we have already renumbered
316 * all busses, we can't rely on the OF bus number any more. 192 * all busses, we can't rely on the OF bus number any more.
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 6baabc13306a..478f8d78716b 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
142 traverse_pci_devices(dn, update_dn_pci_info, phb); 142 traverse_pci_devices(dn, update_dn_pci_info, phb);
143} 143}
144 144
145/*
146 * Traversal func that looks for a <busno,devfcn> value.
147 * If found, the pci_dn is returned (thus terminating the traversal).
148 */
149static void *is_devfn_node(struct device_node *dn, void *data)
150{
151 int busno = ((unsigned long)data >> 8) & 0xff;
152 int devfn = ((unsigned long)data) & 0xff;
153 struct pci_dn *pci = dn->data;
154
155 if (pci && (devfn == pci->devfn) && (busno == pci->busno))
156 return dn;
157 return NULL;
158}
159
160/*
161 * This is the "slow" path for looking up a device_node from a
162 * pci_dev. It will hunt for the device under its parent's
163 * phb and then update of_node pointer.
164 *
165 * It may also do fixups on the actual device since this happens
166 * on the first read/write.
167 *
168 * Note that it also must deal with devices that don't exist.
169 * In this case it may probe for real hardware ("just in case")
170 * and add a device_node to the device tree if necessary.
171 *
172 * Is this function necessary anymore now that dev->dev.of_node is
173 * used to store the node pointer?
174 *
175 */
176struct device_node *fetch_dev_dn(struct pci_dev *dev)
177{
178 struct pci_controller *phb = dev->sysdata;
179 struct device_node *dn;
180 unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
181
182 if (WARN_ON(!phb))
183 return NULL;
184
185 dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
186 if (dn)
187 dev->dev.of_node = dn;
188 return dn;
189}
190EXPORT_SYMBOL(fetch_dev_dn);
191
192/** 145/**
193 * pci_devs_phb_init - Initialize phbs and pci devs under them. 146 * pci_devs_phb_init - Initialize phbs and pci devs under them.
194 * 147 *
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 1e89a72fd030..fe0a5ad6f73e 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev);
202 * this routine in turn call of_scan_bus() recusively to scan for more child 202 * this routine in turn call of_scan_bus() recusively to scan for more child
203 * devices. 203 * devices.
204 */ 204 */
205void __devinit of_scan_pci_bridge(struct device_node *node, 205void __devinit of_scan_pci_bridge(struct pci_dev *dev)
206 struct pci_dev *dev)
207{ 206{
207 struct device_node *node = dev->dev.of_node;
208 struct pci_bus *bus; 208 struct pci_bus *bus;
209 const u32 *busrange, *ranges; 209 const u32 *busrange, *ranges;
210 int len, i, mode; 210 int len, i, mode;
@@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
238 bus->primary = dev->bus->number; 238 bus->primary = dev->bus->number;
239 bus->subordinate = busrange[1]; 239 bus->subordinate = busrange[1];
240 bus->bridge_ctl = 0; 240 bus->bridge_ctl = 0;
241 bus->dev.of_node = of_node_get(node);
242 241
243 /* parse ranges property */ 242 /* parse ranges property */
244 /* PCI #address-cells == 3 and #size-cells == 2 always */ 243 /* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
335 list_for_each_entry(dev, &bus->devices, bus_list) { 334 list_for_each_entry(dev, &bus->devices, bus_list) {
336 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 335 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
337 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 336 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
338 struct device_node *child = pci_device_to_OF_node(dev); 337 of_scan_pci_bridge(dev);
339 if (child)
340 of_scan_pci_bridge(child, dev);
341 } 338 }
342 } 339 }
343} 340}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 822f63008ae1..14967de98876 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
1207 * here so there is no possibility of being interrupted. 1207 * here so there is no possibility of being interrupted.
1208 */ 1208 */
1209static void record_and_restart(struct perf_event *event, unsigned long val, 1209static void record_and_restart(struct perf_event *event, unsigned long val,
1210 struct pt_regs *regs, int nmi) 1210 struct pt_regs *regs)
1211{ 1211{
1212 u64 period = event->hw.sample_period; 1212 u64 period = event->hw.sample_period;
1213 s64 prev, delta, left; 1213 s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1258 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1259 perf_get_data_addr(regs, &data.addr); 1259 perf_get_data_addr(regs, &data.addr);
1260 1260
1261 if (perf_event_overflow(event, nmi, &data, regs)) 1261 if (perf_event_overflow(event, &data, regs))
1262 power_pmu_stop(event, 0); 1262 power_pmu_stop(event, 0);
1263 } 1263 }
1264} 1264}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1346 if ((int)val < 0) { 1346 if ((int)val < 0) {
1347 /* event has overflowed */ 1347 /* event has overflowed */
1348 found = 1; 1348 found = 1;
1349 record_and_restart(event, val, regs, nmi); 1349 record_and_restart(event, val, regs);
1350 } 1350 }
1351 } 1351 }
1352 1352
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index b0dc8f7069cd..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
568 * here so there is no possibility of being interrupted. 568 * here so there is no possibility of being interrupted.
569 */ 569 */
570static void record_and_restart(struct perf_event *event, unsigned long val, 570static void record_and_restart(struct perf_event *event, unsigned long val,
571 struct pt_regs *regs, int nmi) 571 struct pt_regs *regs)
572{ 572{
573 u64 period = event->hw.sample_period; 573 u64 period = event->hw.sample_period;
574 s64 prev, delta, left; 574 s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
616 perf_sample_data_init(&data, 0); 616 perf_sample_data_init(&data, 0);
617 data.period = event->hw.last_period; 617 data.period = event->hw.last_period;
618 618
619 if (perf_event_overflow(event, nmi, &data, regs)) 619 if (perf_event_overflow(event, &data, regs))
620 fsl_emb_pmu_stop(event, 0); 620 fsl_emb_pmu_stop(event, 0);
621 } 621 }
622} 622}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
644 if (event) { 644 if (event) {
645 /* event has overflowed */ 645 /* event has overflowed */
646 found = 1; 646 found = 1;
647 record_and_restart(event, val, regs, nmi); 647 record_and_restart(event, val, regs);
648 } else { 648 } else {
649 /* 649 /*
650 * Disabled counter is negative, 650 * Disabled counter is negative,
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index ead8b3c2649e..e9dbc2d35c9c 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
587 [C(OP_WRITE)] = { -1, -1 }, 587 [C(OP_WRITE)] = { -1, -1 },
588 [C(OP_PREFETCH)] = { -1, -1 }, 588 [C(OP_PREFETCH)] = { -1, -1 },
589 }, 589 },
590 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
591 [C(OP_READ)] = { -1, -1 },
592 [C(OP_WRITE)] = { -1, -1 },
593 [C(OP_PREFETCH)] = { -1, -1 },
594 },
590}; 595};
591 596
592static struct power_pmu power4_pmu = { 597static struct power_pmu power4_pmu = {
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index eca0ac595cb6..f58a2bd41b59 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
653 [C(OP_WRITE)] = { -1, -1 }, 653 [C(OP_WRITE)] = { -1, -1 },
654 [C(OP_PREFETCH)] = { -1, -1 }, 654 [C(OP_PREFETCH)] = { -1, -1 },
655 }, 655 },
656 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
657 [C(OP_READ)] = { -1, -1 },
658 [C(OP_WRITE)] = { -1, -1 },
659 [C(OP_PREFETCH)] = { -1, -1 },
660 },
656}; 661};
657 662
658static struct power_pmu power5p_pmu = { 663static struct power_pmu power5p_pmu = {
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index d5ff0f64a5e6..b1acab684142 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
595 [C(OP_WRITE)] = { -1, -1 }, 595 [C(OP_WRITE)] = { -1, -1 },
596 [C(OP_PREFETCH)] = { -1, -1 }, 596 [C(OP_PREFETCH)] = { -1, -1 },
597 }, 597 },
598 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
599 [C(OP_READ)] = { -1, -1 },
600 [C(OP_WRITE)] = { -1, -1 },
601 [C(OP_PREFETCH)] = { -1, -1 },
602 },
598}; 603};
599 604
600static struct power_pmu power5_pmu = { 605static struct power_pmu power5_pmu = {
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 31603927e376..b24a3a23d073 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
516 [C(OP_WRITE)] = { -1, -1 }, 516 [C(OP_WRITE)] = { -1, -1 },
517 [C(OP_PREFETCH)] = { -1, -1 }, 517 [C(OP_PREFETCH)] = { -1, -1 },
518 }, 518 },
519 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
520 [C(OP_READ)] = { -1, -1 },
521 [C(OP_WRITE)] = { -1, -1 },
522 [C(OP_PREFETCH)] = { -1, -1 },
523 },
519}; 524};
520 525
521static struct power_pmu power6_pmu = { 526static struct power_pmu power6_pmu = {
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 593740fcb799..6d9dccb2ea59 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
342 [C(OP_WRITE)] = { -1, -1 }, 342 [C(OP_WRITE)] = { -1, -1 },
343 [C(OP_PREFETCH)] = { -1, -1 }, 343 [C(OP_PREFETCH)] = { -1, -1 },
344 }, 344 },
345 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
346 [C(OP_READ)] = { -1, -1 },
347 [C(OP_WRITE)] = { -1, -1 },
348 [C(OP_PREFETCH)] = { -1, -1 },
349 },
345}; 350};
346 351
347static struct power_pmu power7_pmu = { 352static struct power_pmu power7_pmu = {
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 9a6e093858fe..b121de9658eb 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
467 [C(OP_WRITE)] = { -1, -1 }, 467 [C(OP_WRITE)] = { -1, -1 },
468 [C(OP_PREFETCH)] = { -1, -1 }, 468 [C(OP_PREFETCH)] = { -1, -1 },
469 }, 469 },
470 [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
471 [C(OP_READ)] = { -1, -1 },
472 [C(OP_WRITE)] = { -1, -1 },
473 [C(OP_PREFETCH)] = { -1, -1 },
474 },
470}; 475};
471 476
472static struct power_pmu ppc970_pmu = { 477static struct power_pmu ppc970_pmu = {
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024f2b42..05b7dd217f60 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
882} 882}
883 883
884#ifdef CONFIG_HAVE_HW_BREAKPOINT 884#ifdef CONFIG_HAVE_HW_BREAKPOINT
885void ptrace_triggered(struct perf_event *bp, int nmi, 885void ptrace_triggered(struct perf_event *bp,
886 struct perf_sample_data *data, struct pt_regs *regs) 886 struct perf_sample_data *data, struct pt_regs *regs)
887{ 887{
888 struct perf_event_attr attr; 888 struct perf_event_attr attr;
@@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
973 &attr.bp_type); 973 &attr.bp_type);
974 974
975 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, 975 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
976 ptrace_triggered, task); 976 ptrace_triggered, NULL, task);
977 if (IS_ERR(bp)) { 977 if (IS_ERR(bp)) {
978 thread->ptrace_bps[0] = NULL; 978 thread->ptrace_bps[0] = NULL;
979 ptrace_put_breakpoints(task); 979 ptrace_put_breakpoints(task);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f33acfd872ad..03b29a6759ab 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
544 544
545#endif /* 32 vs 64 bit */ 545#endif /* 32 vs 64 bit */
546 546
547void set_irq_work_pending(void) 547void arch_irq_work_raise(void)
548{ 548{
549 preempt_disable(); 549 preempt_disable();
550 set_irq_work_pending_flag(); 550 set_irq_work_pending_flag();
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index eeb42e06f2d7..78133deb4b64 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -129,6 +129,5 @@ config KVM_E500
129 If unsure, say N. 129 If unsure, say N.
130 130
131source drivers/vhost/Kconfig 131source drivers/vhost/Kconfig
132source drivers/virtio/Kconfig
133 132
134endif # VIRTUALIZATION 133endif # VIRTUALIZATION
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ad35f66c69e8..5efe8c96d37f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -174,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
174 die("Weird page fault", regs, SIGSEGV); 174 die("Weird page fault", regs, SIGSEGV);
175 } 175 }
176 176
177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
178 178
179 /* When running in the kernel we expect faults to occur only to 179 /* When running in the kernel we expect faults to occur only to
180 * addresses in user space. All other faults represent errors in the 180 * addresses in user space. All other faults represent errors in the
@@ -320,7 +320,7 @@ good_area:
320 } 320 }
321 if (ret & VM_FAULT_MAJOR) { 321 if (ret & VM_FAULT_MAJOR) {
322 current->maj_flt++; 322 current->maj_flt++;
323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 323 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
324 regs, address); 324 regs, address);
325#ifdef CONFIG_PPC_SMLPAR 325#ifdef CONFIG_PPC_SMLPAR
326 if (firmware_has_feature(FW_FEATURE_CMO)) { 326 if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -331,7 +331,7 @@ good_area:
331#endif 331#endif
332 } else { 332 } else {
333 current->min_flt++; 333 current->min_flt++;
334 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 334 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
335 regs, address); 335 regs, address);
336 } 336 }
337 up_read(&mm->mmap_sem); 337 up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
new file mode 100644
index 000000000000..266b3950c3ac
--- /dev/null
+++ b/arch/powerpc/net/Makefile
@@ -0,0 +1,4 @@
1#
2# Arch-specific network modules
3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
new file mode 100644
index 000000000000..af1ab5e9a691
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit.h
@@ -0,0 +1,227 @@
1/* bpf_jit.h: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10#ifndef _BPF_JIT_H
11#define _BPF_JIT_H
12
13#define BPF_PPC_STACK_LOCALS 32
14#define BPF_PPC_STACK_BASIC (48+64)
15#define BPF_PPC_STACK_SAVE (18*8)
16#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
17 BPF_PPC_STACK_SAVE)
18#define BPF_PPC_SLOWPATH_FRAME (48+64)
19
20/*
21 * Generated code register usage:
22 *
23 * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
24 *
25 * skb r3 (Entry parameter)
26 * A register r4
27 * X register r5
28 * addr param r6
29 * r7-r10 scratch
30 * skb->data r14
31 * skb headlen r15 (skb->len - skb->data_len)
32 * m[0] r16
33 * m[...] ...
34 * m[15] r31
35 */
36#define r_skb 3
37#define r_ret 3
38#define r_A 4
39#define r_X 5
40#define r_addr 6
41#define r_scratch1 7
42#define r_D 14
43#define r_HL 15
44#define r_M 16
45
46#ifndef __ASSEMBLY__
47
48/*
49 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
50 */
51extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
52
53#define FUNCTION_DESCR_SIZE 24
54
55/*
56 * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
57 * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
58 * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
59 */
60#define IMM_H(i) ((uintptr_t)(i)>>16)
61#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
62 (((uintptr_t)(i) & 0x8000) >> 15))
63#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
64
65#define PLANT_INSTR(d, idx, instr) \
66 do { if (d) { (d)[idx] = instr; } idx++; } while (0)
67#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
68
69#define PPC_NOP() EMIT(PPC_INST_NOP)
70#define PPC_BLR() EMIT(PPC_INST_BLR)
71#define PPC_BLRL() EMIT(PPC_INST_BLRL)
72#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
73#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
74 __PPC_RA(a) | IMM_L(i))
75#define PPC_MR(d, a) PPC_OR(d, a, a)
76#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
77#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
78 __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
79#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
80#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
81 __PPC_RA(base) | ((i) & 0xfffc))
82
83#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
84 __PPC_RA(base) | IMM_L(i))
85#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
86 __PPC_RA(base) | IMM_L(i))
87#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
88 __PPC_RA(base) | IMM_L(i))
89/* Convenience helpers for the above with 'far' offsets: */
90#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
91 else { PPC_ADDIS(r, base, IMM_HA(i)); \
92 PPC_LD(r, r, IMM_L(i)); } } while(0)
93
94#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
95 else { PPC_ADDIS(r, base, IMM_HA(i)); \
96 PPC_LWZ(r, r, IMM_L(i)); } } while(0)
97
98#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
99 else { PPC_ADDIS(r, base, IMM_HA(i)); \
100 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
101
102#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
103#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
104#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
105#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
106
107#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
108 __PPC_RB(a) | __PPC_RA(b))
109#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
110 __PPC_RA(a) | __PPC_RB(b))
111#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
112 __PPC_RA(a) | __PPC_RB(b))
113#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
114 __PPC_RA(a) | __PPC_RB(b))
115#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
116 __PPC_RA(a) | IMM_L(i))
117#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
118 __PPC_RA(a) | __PPC_RB(b))
119#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
120 __PPC_RS(a) | __PPC_RB(b))
121#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
122 __PPC_RS(a) | IMM_L(i))
123#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
124 __PPC_RS(a) | __PPC_RB(b))
125#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
126 __PPC_RS(a) | __PPC_RB(b))
127#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
128 __PPC_RS(a) | IMM_L(i))
129#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
130 __PPC_RS(a) | IMM_L(i))
131#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
132 __PPC_RS(a) | __PPC_RB(s))
133#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
134 __PPC_RS(a) | __PPC_RB(s))
135/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
136#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
137 __PPC_RS(a) | __PPC_SH(i) | \
138 __PPC_MB(0) | __PPC_ME(31-(i)))
139/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
140#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
141 __PPC_RS(a) | __PPC_SH(32-(i)) | \
142 __PPC_MB(i) | __PPC_ME(31))
143/* sldi = rldicr Rx, Ry, n, 63-n */
144#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
145 __PPC_RS(a) | __PPC_SH(i) | \
146 __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
147#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
148
149/* Long jump; (unconditional 'branch') */
150#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
151 (((dest) - (ctx->idx * 4)) & 0x03fffffc))
152/* "cond" here covers BO:BI fields. */
153#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
154 (((cond) & 0x3ff) << 16) | \
155 (((dest) - (ctx->idx * 4)) & \
156 0xfffc))
157#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
158 if ((u32)(uintptr_t)(i) >= 32768) { \
159 PPC_ADDIS(d, d, IMM_HA(i)); \
160 } } while(0)
161#define PPC_LI64(d, i) do { \
162 if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
163 PPC_LI32(d, i); \
164 else { \
165 PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
166 if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
167 PPC_ORI(d, d, \
168 ((uintptr_t)(i) >> 32) & 0xffff); \
169 PPC_SLDI(d, d, 32); \
170 if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
171 PPC_ORIS(d, d, \
172 ((uintptr_t)(i) >> 16) & 0xffff); \
173 if ((uintptr_t)(i) & 0x000000000000ffffULL) \
174 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
175 } } while (0);
176
177static inline bool is_nearbranch(int offset)
178{
179 return (offset < 32768) && (offset >= -32768);
180}
181
182/*
183 * The fly in the ointment of code size changing from pass to pass is
184 * avoided by padding the short branch case with a NOP. If code size differs
185 * with different branch reaches we will have the issue of code moving from
186 * one pass to the next and will need a few passes to converge on a stable
187 * state.
188 */
189#define PPC_BCC(cond, dest) do { \
190 if (is_nearbranch((dest) - (ctx->idx * 4))) { \
191 PPC_BCC_SHORT(cond, dest); \
192 PPC_NOP(); \
193 } else { \
194 /* Flip the 'T or F' bit to invert comparison */ \
195 PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
196 PPC_JMP(dest); \
197 } } while(0)
198
199/* To create a branch condition, select a bit of cr0... */
200#define CR0_LT 0
201#define CR0_GT 1
202#define CR0_EQ 2
203/* ...and modify BO[3] */
204#define COND_CMP_TRUE 0x100
205#define COND_CMP_FALSE 0x000
206/* Together, they make all required comparisons: */
207#define COND_GT (CR0_GT | COND_CMP_TRUE)
208#define COND_GE (CR0_LT | COND_CMP_FALSE)
209#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
210#define COND_NE (CR0_EQ | COND_CMP_FALSE)
211#define COND_LT (CR0_LT | COND_CMP_TRUE)
212
213#define SEEN_DATAREF 0x10000 /* might call external helpers */
214#define SEEN_XREG 0x20000 /* X reg is used */
215#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
216 * storage */
217#define SEEN_MEM_MSK 0x0ffff
218
219struct codegen_context {
220 unsigned int seen;
221 unsigned int idx;
222 int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
223};
224
225#endif
226
227#endif
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
new file mode 100644
index 000000000000..ff4506e85cce
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -0,0 +1,138 @@
1/* bpf_jit.S: Packet/header access helper functions
2 * for PPC64 BPF compiler.
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
10 */
11
12#include <asm/ppc_asm.h>
13#include "bpf_jit.h"
14
15/*
16 * All of these routines are called directly from generated code,
17 * whose register usage is:
18 *
19 * r3 skb
20 * r4,r5 A,X
21 * r6 *** address parameter to helper ***
22 * r7-r10 scratch
23 * r14 skb->data
24 * r15 skb headlen
25 * r16-31 M[]
26 */
27
28/*
29 * To consider: These helpers are so small it could be better to just
30 * generate them inline. Inline code can do the simple headlen check
31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.)
34 *
35 * Technically, the "is addr < 0" check is unnecessary & slowing down
36 * the ABS path, as it's statically checked on generation.
37 */
38 .globl sk_load_word
39sk_load_word:
40 cmpdi r_addr, 0
41 blt bpf_error
42 /* Are we accessing past headlen? */
43 subi r_scratch1, r_HL, 4
44 cmpd r_scratch1, r_addr
45 blt bpf_slow_path_word
46 /* Nope, just hitting the header. cr0 here is eq or gt! */
47 lwzx r_A, r_D, r_addr
48 /* When big endian we don't need to byteswap. */
49 blr /* Return success, cr0 != LT */
50
51 .globl sk_load_half
52sk_load_half:
53 cmpdi r_addr, 0
54 blt bpf_error
55 subi r_scratch1, r_HL, 2
56 cmpd r_scratch1, r_addr
57 blt bpf_slow_path_half
58 lhzx r_A, r_D, r_addr
59 blr
60
61 .globl sk_load_byte
62sk_load_byte:
63 cmpdi r_addr, 0
64 blt bpf_error
65 cmpd r_HL, r_addr
66 ble bpf_slow_path_byte
67 lbzx r_A, r_D, r_addr
68 blr
69
70/*
71 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
72 * r_addr is the offset value, already known positive
73 */
74 .globl sk_load_byte_msh
75sk_load_byte_msh:
76 cmpd r_HL, r_addr
77 ble bpf_slow_path_byte_msh
78 lbzx r_X, r_D, r_addr
79 rlwinm r_X, r_X, 2, 32-4-2, 31-2
80 blr
81
82bpf_error:
83 /* Entered with cr0 = lt */
84 li r3, 0
85 /* Generated code will 'blt epilogue', returning 0. */
86 blr
87
88/* Call out to skb_copy_bits:
89 * We'll need to back up our volatile regs first; we have
90 * local variable space at r1+(BPF_PPC_STACK_BASIC).
91 * Allocate a new stack frame here to remain ABI-compliant in
92 * stashing LR.
93 */
94#define bpf_slow_path_common(SIZE) \
95 mflr r0; \
96 std r0, 16(r1); \
97 /* R3 goes in parameter space of caller's frame */ \
98 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
99 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
100 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
101 addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
102 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
103 /* R3 = r_skb, as passed */ \
104 mr r4, r_addr; \
105 li r6, SIZE; \
106 bl skb_copy_bits; \
107 /* R3 = 0 on success */ \
108 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
109 ld r0, 16(r1); \
110 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
111 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
112 mtlr r0; \
113 cmpdi r3, 0; \
114 blt bpf_error; /* cr0 = LT */ \
115 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
116 /* Great success! */
117
118bpf_slow_path_word:
119 bpf_slow_path_common(4)
120 /* Data value is on stack, and cr0 != LT */
121 lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
122 blr
123
124bpf_slow_path_half:
125 bpf_slow_path_common(2)
126 lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
127 blr
128
129bpf_slow_path_byte:
130 bpf_slow_path_common(1)
131 lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
132 blr
133
134bpf_slow_path_byte_msh:
135 bpf_slow_path_common(1)
136 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
137 rlwinm r_X, r_X, 2, 32-4-2, 31-2
138 blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..73619d3aeb6c
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -0,0 +1,694 @@
1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#include <linux/moduleloader.h>
13#include <asm/cacheflush.h>
14#include <linux/netdevice.h>
15#include <linux/filter.h>
16#include "bpf_jit.h"
17
18#ifndef __BIG_ENDIAN
19/* There are endianness assumptions herein. */
20#error "Little-endian PPC not supported in BPF compiler"
21#endif
22
23int bpf_jit_enable __read_mostly;
24
25
26static inline void bpf_flush_icache(void *start, void *end)
27{
28 smp_wmb();
29 flush_icache_range((unsigned long)start, (unsigned long)end);
30}
31
32static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
33 struct codegen_context *ctx)
34{
35 int i;
36 const struct sock_filter *filter = fp->insns;
37
38 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
39 /* Make stackframe */
40 if (ctx->seen & SEEN_DATAREF) {
41 /* If we call any helpers (for loads), save LR */
42 EMIT(PPC_INST_MFLR | __PPC_RT(0));
43 PPC_STD(0, 1, 16);
44
45 /* Back up non-volatile regs. */
46 PPC_STD(r_D, 1, -(8*(32-r_D)));
47 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
48 }
49 if (ctx->seen & SEEN_MEM) {
50 /*
51 * Conditionally save regs r15-r31 as some will be used
52 * for M[] data.
53 */
54 for (i = r_M; i < (r_M+16); i++) {
55 if (ctx->seen & (1 << (i-r_M)))
56 PPC_STD(i, 1, -(8*(32-i)));
57 }
58 }
59 EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
60 (-BPF_PPC_STACKFRAME & 0xfffc));
61 }
62
63 if (ctx->seen & SEEN_DATAREF) {
64 /*
65 * If this filter needs to access skb data,
66 * prepare r_D and r_HL:
67 * r_HL = skb->len - skb->data_len
68 * r_D = skb->data
69 */
70 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
71 data_len));
72 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
73 PPC_SUB(r_HL, r_HL, r_scratch1);
74 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
75 }
76
77 if (ctx->seen & SEEN_XREG) {
78 /*
79 * TODO: Could also detect whether first instr. sets X and
80 * avoid this (as below, with A).
81 */
82 PPC_LI(r_X, 0);
83 }
84
85 switch (filter[0].code) {
86 case BPF_S_RET_K:
87 case BPF_S_LD_W_LEN:
88 case BPF_S_ANC_PROTOCOL:
89 case BPF_S_ANC_IFINDEX:
90 case BPF_S_ANC_MARK:
91 case BPF_S_ANC_RXHASH:
92 case BPF_S_ANC_CPU:
93 case BPF_S_ANC_QUEUE:
94 case BPF_S_LD_W_ABS:
95 case BPF_S_LD_H_ABS:
96 case BPF_S_LD_B_ABS:
97 /* first instruction sets A register (or is RET 'constant') */
98 break;
99 default:
100 /* make sure we dont leak kernel information to user */
101 PPC_LI(r_A, 0);
102 }
103}
104
105static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
106{
107 int i;
108
109 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
110 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
111 if (ctx->seen & SEEN_DATAREF) {
112 PPC_LD(0, 1, 16);
113 PPC_MTLR(0);
114 PPC_LD(r_D, 1, -(8*(32-r_D)));
115 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
116 }
117 if (ctx->seen & SEEN_MEM) {
118 /* Restore any saved non-vol registers */
119 for (i = r_M; i < (r_M+16); i++) {
120 if (ctx->seen & (1 << (i-r_M)))
121 PPC_LD(i, 1, -(8*(32-i)));
122 }
123 }
124 }
125 /* The RETs have left a return value in R3. */
126
127 PPC_BLR();
128}
129
130/* Assemble the body code between the prologue & epilogue. */
131static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
132 struct codegen_context *ctx,
133 unsigned int *addrs)
134{
135 const struct sock_filter *filter = fp->insns;
136 int flen = fp->len;
137 u8 *func;
138 unsigned int true_cond;
139 int i;
140
141 /* Start of epilogue code */
142 unsigned int exit_addr = addrs[flen];
143
144 for (i = 0; i < flen; i++) {
145 unsigned int K = filter[i].k;
146
147 /*
148 * addrs[] maps a BPF bytecode address into a real offset from
149 * the start of the body code.
150 */
151 addrs[i] = ctx->idx * 4;
152
153 switch (filter[i].code) {
154 /*** ALU ops ***/
155 case BPF_S_ALU_ADD_X: /* A += X; */
156 ctx->seen |= SEEN_XREG;
157 PPC_ADD(r_A, r_A, r_X);
158 break;
159 case BPF_S_ALU_ADD_K: /* A += K; */
160 if (!K)
161 break;
162 PPC_ADDI(r_A, r_A, IMM_L(K));
163 if (K >= 32768)
164 PPC_ADDIS(r_A, r_A, IMM_HA(K));
165 break;
166 case BPF_S_ALU_SUB_X: /* A -= X; */
167 ctx->seen |= SEEN_XREG;
168 PPC_SUB(r_A, r_A, r_X);
169 break;
170 case BPF_S_ALU_SUB_K: /* A -= K */
171 if (!K)
172 break;
173 PPC_ADDI(r_A, r_A, IMM_L(-K));
174 if (K >= 32768)
175 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
176 break;
177 case BPF_S_ALU_MUL_X: /* A *= X; */
178 ctx->seen |= SEEN_XREG;
179 PPC_MUL(r_A, r_A, r_X);
180 break;
181 case BPF_S_ALU_MUL_K: /* A *= K */
182 if (K < 32768)
183 PPC_MULI(r_A, r_A, K);
184 else {
185 PPC_LI32(r_scratch1, K);
186 PPC_MUL(r_A, r_A, r_scratch1);
187 }
188 break;
189 case BPF_S_ALU_DIV_X: /* A /= X; */
190 ctx->seen |= SEEN_XREG;
191 PPC_CMPWI(r_X, 0);
192 if (ctx->pc_ret0 != -1) {
193 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
194 } else {
195 /*
196 * Exit, returning 0; first pass hits here
197 * (longer worst-case code size).
198 */
199 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
200 PPC_LI(r_ret, 0);
201 PPC_JMP(exit_addr);
202 }
203 PPC_DIVWU(r_A, r_A, r_X);
204 break;
205 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
206 PPC_LI32(r_scratch1, K);
207 /* Top 32 bits of 64bit result -> A */
208 PPC_MULHWU(r_A, r_A, r_scratch1);
209 break;
210 case BPF_S_ALU_AND_X:
211 ctx->seen |= SEEN_XREG;
212 PPC_AND(r_A, r_A, r_X);
213 break;
214 case BPF_S_ALU_AND_K:
215 if (!IMM_H(K))
216 PPC_ANDI(r_A, r_A, K);
217 else {
218 PPC_LI32(r_scratch1, K);
219 PPC_AND(r_A, r_A, r_scratch1);
220 }
221 break;
222 case BPF_S_ALU_OR_X:
223 ctx->seen |= SEEN_XREG;
224 PPC_OR(r_A, r_A, r_X);
225 break;
226 case BPF_S_ALU_OR_K:
227 if (IMM_L(K))
228 PPC_ORI(r_A, r_A, IMM_L(K));
229 if (K >= 65536)
230 PPC_ORIS(r_A, r_A, IMM_H(K));
231 break;
232 case BPF_S_ALU_LSH_X: /* A <<= X; */
233 ctx->seen |= SEEN_XREG;
234 PPC_SLW(r_A, r_A, r_X);
235 break;
236 case BPF_S_ALU_LSH_K:
237 if (K == 0)
238 break;
239 else
240 PPC_SLWI(r_A, r_A, K);
241 break;
242 case BPF_S_ALU_RSH_X: /* A >>= X; */
243 ctx->seen |= SEEN_XREG;
244 PPC_SRW(r_A, r_A, r_X);
245 break;
246 case BPF_S_ALU_RSH_K: /* A >>= K; */
247 if (K == 0)
248 break;
249 else
250 PPC_SRWI(r_A, r_A, K);
251 break;
252 case BPF_S_ALU_NEG:
253 PPC_NEG(r_A, r_A);
254 break;
255 case BPF_S_RET_K:
256 PPC_LI32(r_ret, K);
257 if (!K) {
258 if (ctx->pc_ret0 == -1)
259 ctx->pc_ret0 = i;
260 }
261 /*
262 * If this isn't the very last instruction, branch to
263 * the epilogue if we've stuff to clean up. Otherwise,
264 * if there's nothing to tidy, just return. If we /are/
265 * the last instruction, we're about to fall through to
266 * the epilogue to return.
267 */
268 if (i != flen - 1) {
269 /*
270 * Note: 'seen' is properly valid only on pass
271 * #2. Both parts of this conditional are the
272 * same instruction size though, meaning the
273 * first pass will still correctly determine the
274 * code size/addresses.
275 */
276 if (ctx->seen)
277 PPC_JMP(exit_addr);
278 else
279 PPC_BLR();
280 }
281 break;
282 case BPF_S_RET_A:
283 PPC_MR(r_ret, r_A);
284 if (i != flen - 1) {
285 if (ctx->seen)
286 PPC_JMP(exit_addr);
287 else
288 PPC_BLR();
289 }
290 break;
291 case BPF_S_MISC_TAX: /* X = A */
292 PPC_MR(r_X, r_A);
293 break;
294 case BPF_S_MISC_TXA: /* A = X */
295 ctx->seen |= SEEN_XREG;
296 PPC_MR(r_A, r_X);
297 break;
298
299 /*** Constant loads/M[] access ***/
300 case BPF_S_LD_IMM: /* A = K */
301 PPC_LI32(r_A, K);
302 break;
303 case BPF_S_LDX_IMM: /* X = K */
304 PPC_LI32(r_X, K);
305 break;
306 case BPF_S_LD_MEM: /* A = mem[K] */
307 PPC_MR(r_A, r_M + (K & 0xf));
308 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
309 break;
310 case BPF_S_LDX_MEM: /* X = mem[K] */
311 PPC_MR(r_X, r_M + (K & 0xf));
312 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
313 break;
314 case BPF_S_ST: /* mem[K] = A */
315 PPC_MR(r_M + (K & 0xf), r_A);
316 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
317 break;
318 case BPF_S_STX: /* mem[K] = X */
319 PPC_MR(r_M + (K & 0xf), r_X);
320 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
321 break;
322 case BPF_S_LD_W_LEN: /* A = skb->len; */
323 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
324 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
325 break;
326 case BPF_S_LDX_W_LEN: /* X = skb->len; */
327 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
328 break;
329
330 /*** Ancillary info loads ***/
331
332 /* None of the BPF_S_ANC* codes appear to be passed by
333 * sk_chk_filter(). The interpreter and the x86 BPF
334 * compiler implement them so we do too -- they may be
335 * planted in future.
336 */
337 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
338 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
339 protocol) != 2);
340 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
341 protocol));
342 /* ntohs is a NOP with BE loads. */
343 break;
344 case BPF_S_ANC_IFINDEX:
345 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
346 dev));
347 PPC_CMPDI(r_scratch1, 0);
348 if (ctx->pc_ret0 != -1) {
349 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
350 } else {
351 /* Exit, returning 0; first pass hits here. */
352 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
353 PPC_LI(r_ret, 0);
354 PPC_JMP(exit_addr);
355 }
356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
357 ifindex) != 4);
358 PPC_LWZ_OFFS(r_A, r_scratch1,
359 offsetof(struct net_device, ifindex));
360 break;
361 case BPF_S_ANC_MARK:
362 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
363 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
364 mark));
365 break;
366 case BPF_S_ANC_RXHASH:
367 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
368 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
369 rxhash));
370 break;
371 case BPF_S_ANC_QUEUE:
372 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
373 queue_mapping) != 2);
374 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375 queue_mapping));
376 break;
377 case BPF_S_ANC_CPU:
378#ifdef CONFIG_SMP
379 /*
380 * PACA ptr is r13:
381 * raw_smp_processor_id() = local_paca->paca_index
382 */
383 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
384 paca_index) != 2);
385 PPC_LHZ_OFFS(r_A, 13,
386 offsetof(struct paca_struct, paca_index));
387#else
388 PPC_LI(r_A, 0);
389#endif
390 break;
391
392 /*** Absolute loads from packet header/data ***/
393 case BPF_S_LD_W_ABS:
394 func = sk_load_word;
395 goto common_load;
396 case BPF_S_LD_H_ABS:
397 func = sk_load_half;
398 goto common_load;
399 case BPF_S_LD_B_ABS:
400 func = sk_load_byte;
401 common_load:
402 /*
403 * Load from [K]. Reference with the (negative)
404 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
405 */
406 ctx->seen |= SEEN_DATAREF;
407 if ((int)K < 0)
408 return -ENOTSUPP;
409 PPC_LI64(r_scratch1, func);
410 PPC_MTLR(r_scratch1);
411 PPC_LI32(r_addr, K);
412 PPC_BLRL();
413 /*
414 * Helper returns 'lt' condition on error, and an
415 * appropriate return value in r3
416 */
417 PPC_BCC(COND_LT, exit_addr);
418 break;
419
420 /*** Indirect loads from packet header/data ***/
421 case BPF_S_LD_W_IND:
422 func = sk_load_word;
423 goto common_load_ind;
424 case BPF_S_LD_H_IND:
425 func = sk_load_half;
426 goto common_load_ind;
427 case BPF_S_LD_B_IND:
428 func = sk_load_byte;
429 common_load_ind:
430 /*
431 * Load from [X + K]. Negative offsets are tested for
432 * in the helper functions, and result in a 'ret 0'.
433 */
434 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
435 PPC_LI64(r_scratch1, func);
436 PPC_MTLR(r_scratch1);
437 PPC_ADDI(r_addr, r_X, IMM_L(K));
438 if (K >= 32768)
439 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
440 PPC_BLRL();
441 /* If error, cr0.LT set */
442 PPC_BCC(COND_LT, exit_addr);
443 break;
444
445 case BPF_S_LDX_B_MSH:
446 /*
447 * x86 version drops packet (RET 0) when K<0, whereas
448 * interpreter does allow K<0 (__load_pointer, special
449 * ancillary data). common_load returns ENOTSUPP if K<0,
450 * so we fall back to interpreter & filter works.
451 */
452 func = sk_load_byte_msh;
453 goto common_load;
454 break;
455
456 /*** Jump and branches ***/
457 case BPF_S_JMP_JA:
458 if (K != 0)
459 PPC_JMP(addrs[i + 1 + K]);
460 break;
461
462 case BPF_S_JMP_JGT_K:
463 case BPF_S_JMP_JGT_X:
464 true_cond = COND_GT;
465 goto cond_branch;
466 case BPF_S_JMP_JGE_K:
467 case BPF_S_JMP_JGE_X:
468 true_cond = COND_GE;
469 goto cond_branch;
470 case BPF_S_JMP_JEQ_K:
471 case BPF_S_JMP_JEQ_X:
472 true_cond = COND_EQ;
473 goto cond_branch;
474 case BPF_S_JMP_JSET_K:
475 case BPF_S_JMP_JSET_X:
476 true_cond = COND_NE;
477 /* Fall through */
478 cond_branch:
479 /* same targets, can avoid doing the test :) */
480 if (filter[i].jt == filter[i].jf) {
481 if (filter[i].jt > 0)
482 PPC_JMP(addrs[i + 1 + filter[i].jt]);
483 break;
484 }
485
486 switch (filter[i].code) {
487 case BPF_S_JMP_JGT_X:
488 case BPF_S_JMP_JGE_X:
489 case BPF_S_JMP_JEQ_X:
490 ctx->seen |= SEEN_XREG;
491 PPC_CMPLW(r_A, r_X);
492 break;
493 case BPF_S_JMP_JSET_X:
494 ctx->seen |= SEEN_XREG;
495 PPC_AND_DOT(r_scratch1, r_A, r_X);
496 break;
497 case BPF_S_JMP_JEQ_K:
498 case BPF_S_JMP_JGT_K:
499 case BPF_S_JMP_JGE_K:
500 if (K < 32768)
501 PPC_CMPLWI(r_A, K);
502 else {
503 PPC_LI32(r_scratch1, K);
504 PPC_CMPLW(r_A, r_scratch1);
505 }
506 break;
507 case BPF_S_JMP_JSET_K:
508 if (K < 32768)
509 /* PPC_ANDI is /only/ dot-form */
510 PPC_ANDI(r_scratch1, r_A, K);
511 else {
512 PPC_LI32(r_scratch1, K);
513 PPC_AND_DOT(r_scratch1, r_A,
514 r_scratch1);
515 }
516 break;
517 }
518 /* Sometimes branches are constructed "backward", with
519 * the false path being the branch and true path being
520 * a fallthrough to the next instruction.
521 */
522 if (filter[i].jt == 0)
523 /* Swap the sense of the branch */
524 PPC_BCC(true_cond ^ COND_CMP_TRUE,
525 addrs[i + 1 + filter[i].jf]);
526 else {
527 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
528 if (filter[i].jf != 0)
529 PPC_JMP(addrs[i + 1 + filter[i].jf]);
530 }
531 break;
532 default:
533 /* The filter contains something cruel & unusual.
534 * We don't handle it, but also there shouldn't be
535 * anything missing from our list.
536 */
537 if (printk_ratelimit())
538 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
539 filter[i].code, i);
540 return -ENOTSUPP;
541 }
542
543 }
544 /* Set end-of-body-code address for exit. */
545 addrs[i] = ctx->idx * 4;
546
547 return 0;
548}
549
550void bpf_jit_compile(struct sk_filter *fp)
551{
552 unsigned int proglen;
553 unsigned int alloclen;
554 u32 *image = NULL;
555 u32 *code_base;
556 unsigned int *addrs;
557 struct codegen_context cgctx;
558 int pass;
559 int flen = fp->len;
560
561 if (!bpf_jit_enable)
562 return;
563
564 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
565 if (addrs == NULL)
566 return;
567
568 /*
569 * There are multiple assembly passes as the generated code will change
570 * size as it settles down, figuring out the max branch offsets/exit
571 * paths required.
572 *
573 * The range of standard conditional branches is +/- 32Kbytes. Since
574 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
575 * finish with 8 bytes/instruction. Not feasible, so long jumps are
576 * used, distinct from short branches.
577 *
578 * Current:
579 *
580 * For now, both branch types assemble to 2 words (short branches padded
581 * with a NOP); this is less efficient, but assembly will always complete
582 * after exactly 3 passes:
583 *
584 * First pass: No code buffer; Program is "faux-generated" -- no code
585 * emitted but maximum size of output determined (and addrs[] filled
586 * in). Also, we note whether we use M[], whether we use skb data, etc.
587 * All generation choices assumed to be 'worst-case', e.g. branches all
588 * far (2 instructions), return path code reduction not available, etc.
589 *
590 * Second pass: Code buffer allocated with size determined previously.
591 * Prologue generated to support features we have seen used. Exit paths
592 * determined and addrs[] is filled in again, as code may be slightly
593 * smaller as a result.
594 *
595 * Third pass: Code generated 'for real', and branch destinations
596 * determined from now-accurate addrs[] map.
597 *
598 * Ideal:
599 *
600 * If we optimise this, near branches will be shorter. On the
601 * first assembly pass, we should err on the side of caution and
602 * generate the biggest code. On subsequent passes, branches will be
603 * generated short or long and code size will reduce. With smaller
604 * code, more branches may fall into the short category, and code will
605 * reduce more.
606 *
607 * Finally, if we see one pass generate code the same size as the
608 * previous pass we have converged and should now generate code for
609 * real. Allocating at the end will also save the memory that would
610 * otherwise be wasted by the (small) current code shrinkage.
611 * Preferably, we should do a small number of passes (e.g. 5) and if we
612 * haven't converged by then, get impatient and force code to generate
613 * as-is, even if the odd branch would be left long. The chances of a
614 * long jump are tiny with all but the most enormous of BPF filter
615 * inputs, so we should usually converge on the third pass.
616 */
617
618 cgctx.idx = 0;
619 cgctx.seen = 0;
620 cgctx.pc_ret0 = -1;
621 /* Scouting faux-generate pass 0 */
622 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
623 /* We hit something illegal or unsupported. */
624 goto out;
625
626 /*
627 * Pretend to build prologue, given the features we've seen. This will
628 * update ctgtx.idx as it pretends to output instructions, then we can
629 * calculate total size from idx.
630 */
631 bpf_jit_build_prologue(fp, 0, &cgctx);
632 bpf_jit_build_epilogue(0, &cgctx);
633
634 proglen = cgctx.idx * 4;
635 alloclen = proglen + FUNCTION_DESCR_SIZE;
636 image = module_alloc(max_t(unsigned int, alloclen,
637 sizeof(struct work_struct)));
638 if (!image)
639 goto out;
640
641 code_base = image + (FUNCTION_DESCR_SIZE/4);
642
643 /* Code generation passes 1-2 */
644 for (pass = 1; pass < 3; pass++) {
645 /* Now build the prologue, body code & epilogue for real. */
646 cgctx.idx = 0;
647 bpf_jit_build_prologue(fp, code_base, &cgctx);
648 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
649 bpf_jit_build_epilogue(code_base, &cgctx);
650
651 if (bpf_jit_enable > 1)
652 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
653 proglen - (cgctx.idx * 4), cgctx.seen);
654 }
655
656 if (bpf_jit_enable > 1)
657 pr_info("flen=%d proglen=%u pass=%d image=%p\n",
658 flen, proglen, pass, image);
659
660 if (image) {
661 if (bpf_jit_enable > 1)
662 print_hex_dump(KERN_ERR, "JIT code: ",
663 DUMP_PREFIX_ADDRESS,
664 16, 1, code_base,
665 proglen, false);
666
667 bpf_flush_icache(code_base, code_base + (proglen/4));
668 /* Function descriptor nastiness: Address + TOC */
669 ((u64 *)image)[0] = (u64)code_base;
670 ((u64 *)image)[1] = local_paca->kernel_toc;
671 fp->bpf_func = (void *)image;
672 }
673out:
674 kfree(addrs);
675 return;
676}
677
678static void jit_free_defer(struct work_struct *arg)
679{
680 module_free(NULL, arg);
681}
682
683/* run from softirq, we must use a work_struct to call
684 * module_free() from process context
685 */
686void bpf_jit_free(struct sk_filter *fp)
687{
688 if (fp->bpf_func != sk_run_filter) {
689 struct work_struct *work = (struct work_struct *)fp->bpf_func;
690
691 INIT_WORK(work, jit_free_defer);
692 schedule_work(work);
693 }
694}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 47ea1be1481b..90f4496017e4 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX
55 55
56 It is safe to say 'Y' here 56 It is safe to say 'Y' here
57 57
58config PPC_MPC5200_GPIO
59 bool "MPC5200 GPIO support"
60 depends on PPC_MPC52xx
61 select ARCH_REQUIRE_GPIOLIB
62 select GENERIC_GPIO
63 help
64 Enable gpiolib support for mpc5200 based boards
65
66config PPC_MPC5200_LPBFIFO 58config PPC_MPC5200_LPBFIFO
67 tristate "MPC5200 LocalPlus bus FIFO driver" 59 tristate "MPC5200 LocalPlus bus FIFO driver"
68 depends on PPC_MPC52xx 60 depends on PPC_MPC52xx
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index 2bc8cd0c5cfc..4e62486791e9 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
14 obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o 14 obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
15endif 15endif
16 16
17obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
18obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o 17obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
deleted file mode 100644
index 1757d1db4b51..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ /dev/null
@@ -1,380 +0,0 @@
1/*
2 * MPC52xx gpio driver
3 *
4 * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/of.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/of_gpio.h>
24#include <linux/io.h>
25#include <linux/of_platform.h>
26
27#include <asm/gpio.h>
28#include <asm/mpc52xx.h>
29#include <sysdev/fsl_soc.h>
30
31static DEFINE_SPINLOCK(gpio_lock);
32
33struct mpc52xx_gpiochip {
34 struct of_mm_gpio_chip mmchip;
35 unsigned int shadow_dvo;
36 unsigned int shadow_gpioe;
37 unsigned int shadow_ddr;
38};
39
40/*
41 * GPIO LIB API implementation for wakeup GPIOs.
42 *
43 * There's a maximum of 8 wakeup GPIOs. Which of these are available
44 * for use depends on your board setup.
45 *
46 * 0 -> GPIO_WKUP_7
47 * 1 -> GPIO_WKUP_6
48 * 2 -> PSC6_1
49 * 3 -> PSC6_0
50 * 4 -> ETH_17
51 * 5 -> PSC3_9
52 * 6 -> PSC2_4
53 * 7 -> PSC1_4
54 *
55 */
56static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
57{
58 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
59 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
60 unsigned int ret;
61
62 ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
63
64 pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
65
66 return ret;
67}
68
69static inline void
70__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
71{
72 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
73 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
74 struct mpc52xx_gpiochip, mmchip);
75 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
76
77 if (val)
78 chip->shadow_dvo |= 1 << (7 - gpio);
79 else
80 chip->shadow_dvo &= ~(1 << (7 - gpio));
81
82 out_8(&regs->wkup_dvo, chip->shadow_dvo);
83}
84
85static void
86mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&gpio_lock, flags);
91
92 __mpc52xx_wkup_gpio_set(gc, gpio, val);
93
94 spin_unlock_irqrestore(&gpio_lock, flags);
95
96 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
97}
98
99static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
100{
101 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
102 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
103 struct mpc52xx_gpiochip, mmchip);
104 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
105 unsigned long flags;
106
107 spin_lock_irqsave(&gpio_lock, flags);
108
109 /* set the direction */
110 chip->shadow_ddr &= ~(1 << (7 - gpio));
111 out_8(&regs->wkup_ddr, chip->shadow_ddr);
112
113 /* and enable the pin */
114 chip->shadow_gpioe |= 1 << (7 - gpio);
115 out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
116
117 spin_unlock_irqrestore(&gpio_lock, flags);
118
119 return 0;
120}
121
122static int
123mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
124{
125 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
126 struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
127 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
128 struct mpc52xx_gpiochip, mmchip);
129 unsigned long flags;
130
131 spin_lock_irqsave(&gpio_lock, flags);
132
133 __mpc52xx_wkup_gpio_set(gc, gpio, val);
134
135 /* Then set direction */
136 chip->shadow_ddr |= 1 << (7 - gpio);
137 out_8(&regs->wkup_ddr, chip->shadow_ddr);
138
139 /* Finally enable the pin */
140 chip->shadow_gpioe |= 1 << (7 - gpio);
141 out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
142
143 spin_unlock_irqrestore(&gpio_lock, flags);
144
145 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
146
147 return 0;
148}
149
150static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
151{
152 struct mpc52xx_gpiochip *chip;
153 struct mpc52xx_gpio_wkup __iomem *regs;
154 struct gpio_chip *gc;
155 int ret;
156
157 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
158 if (!chip)
159 return -ENOMEM;
160
161 gc = &chip->mmchip.gc;
162
163 gc->ngpio = 8;
164 gc->direction_input = mpc52xx_wkup_gpio_dir_in;
165 gc->direction_output = mpc52xx_wkup_gpio_dir_out;
166 gc->get = mpc52xx_wkup_gpio_get;
167 gc->set = mpc52xx_wkup_gpio_set;
168
169 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
170 if (ret)
171 return ret;
172
173 regs = chip->mmchip.regs;
174 chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
175 chip->shadow_ddr = in_8(&regs->wkup_ddr);
176 chip->shadow_dvo = in_8(&regs->wkup_dvo);
177
178 return 0;
179}
180
181static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
182{
183 return -EBUSY;
184}
185
186static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
187 {
188 .compatible = "fsl,mpc5200-gpio-wkup",
189 },
190 {}
191};
192
193static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
194 .driver = {
195 .name = "gpio_wkup",
196 .owner = THIS_MODULE,
197 .of_match_table = mpc52xx_wkup_gpiochip_match,
198 },
199 .probe = mpc52xx_wkup_gpiochip_probe,
200 .remove = mpc52xx_gpiochip_remove,
201};
202
203/*
204 * GPIO LIB API implementation for simple GPIOs
205 *
206 * There's a maximum of 32 simple GPIOs. Which of these are available
207 * for use depends on your board setup.
208 * The numbering reflects the bit numbering in the port registers:
209 *
210 * 0..1 > reserved
211 * 2..3 > IRDA
212 * 4..7 > ETHR
213 * 8..11 > reserved
214 * 12..15 > USB
215 * 16..17 > reserved
216 * 18..23 > PSC3
217 * 24..27 > PSC2
218 * 28..31 > PSC1
219 */
220static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
221{
222 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
223 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
224 unsigned int ret;
225
226 ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
227
228 return ret;
229}
230
231static inline void
232__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
233{
234 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
235 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
236 struct mpc52xx_gpiochip, mmchip);
237 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
238
239 if (val)
240 chip->shadow_dvo |= 1 << (31 - gpio);
241 else
242 chip->shadow_dvo &= ~(1 << (31 - gpio));
243 out_be32(&regs->simple_dvo, chip->shadow_dvo);
244}
245
246static void
247mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&gpio_lock, flags);
252
253 __mpc52xx_simple_gpio_set(gc, gpio, val);
254
255 spin_unlock_irqrestore(&gpio_lock, flags);
256
257 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
258}
259
260static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
261{
262 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
263 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
264 struct mpc52xx_gpiochip, mmchip);
265 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
266 unsigned long flags;
267
268 spin_lock_irqsave(&gpio_lock, flags);
269
270 /* set the direction */
271 chip->shadow_ddr &= ~(1 << (31 - gpio));
272 out_be32(&regs->simple_ddr, chip->shadow_ddr);
273
274 /* and enable the pin */
275 chip->shadow_gpioe |= 1 << (31 - gpio);
276 out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
277
278 spin_unlock_irqrestore(&gpio_lock, flags);
279
280 return 0;
281}
282
283static int
284mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
285{
286 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
287 struct mpc52xx_gpiochip *chip = container_of(mm_gc,
288 struct mpc52xx_gpiochip, mmchip);
289 struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
290 unsigned long flags;
291
292 spin_lock_irqsave(&gpio_lock, flags);
293
294 /* First set initial value */
295 __mpc52xx_simple_gpio_set(gc, gpio, val);
296
297 /* Then set direction */
298 chip->shadow_ddr |= 1 << (31 - gpio);
299 out_be32(&regs->simple_ddr, chip->shadow_ddr);
300
301 /* Finally enable the pin */
302 chip->shadow_gpioe |= 1 << (31 - gpio);
303 out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
304
305 spin_unlock_irqrestore(&gpio_lock, flags);
306
307 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
308
309 return 0;
310}
311
312static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
313{
314 struct mpc52xx_gpiochip *chip;
315 struct gpio_chip *gc;
316 struct mpc52xx_gpio __iomem *regs;
317 int ret;
318
319 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
320 if (!chip)
321 return -ENOMEM;
322
323 gc = &chip->mmchip.gc;
324
325 gc->ngpio = 32;
326 gc->direction_input = mpc52xx_simple_gpio_dir_in;
327 gc->direction_output = mpc52xx_simple_gpio_dir_out;
328 gc->get = mpc52xx_simple_gpio_get;
329 gc->set = mpc52xx_simple_gpio_set;
330
331 ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
332 if (ret)
333 return ret;
334
335 regs = chip->mmchip.regs;
336 chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
337 chip->shadow_ddr = in_be32(&regs->simple_ddr);
338 chip->shadow_dvo = in_be32(&regs->simple_dvo);
339
340 return 0;
341}
342
343static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
344 {
345 .compatible = "fsl,mpc5200-gpio",
346 },
347 {}
348};
349
350static struct platform_driver mpc52xx_simple_gpiochip_driver = {
351 .driver = {
352 .name = "gpio",
353 .owner = THIS_MODULE,
354 .of_match_table = mpc52xx_simple_gpiochip_match,
355 },
356 .probe = mpc52xx_simple_gpiochip_probe,
357 .remove = mpc52xx_gpiochip_remove,
358};
359
360static int __init mpc52xx_gpio_init(void)
361{
362 if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
363 printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
364
365 if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
366 printk(KERN_ERR "Unable to register simple GPIO driver\n");
367
368 return 0;
369}
370
371
372/* Make sure we get initialised before anyone else tries to use us */
373subsys_initcall(mpc52xx_gpio_init);
374
375/* No exit call at the moment as we cannot unregister of gpio chips */
376
377MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
378MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
379MODULE_LICENSE("GPL v2");
380
diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig
index 022476717718..128de25cc284 100644
--- a/arch/powerpc/platforms/amigaone/Kconfig
+++ b/arch/powerpc/platforms/amigaone/Kconfig
@@ -8,7 +8,7 @@ config AMIGAONE
8 select NOT_COHERENT_CACHE 8 select NOT_COHERENT_CACHE
9 select CHECK_CACHE_COHERENCY 9 select CHECK_CACHE_COHERENCY
10 select DEFAULT_UIMAGE 10 select DEFAULT_UIMAGE
11 select PCSPKR_PLATFORM 11 select HAVE_PCSPKR_PLATFORM
12 help 12 help
13 Select AmigaOne for the following machines: 13 Select AmigaOne for the following machines:
14 - AmigaOne SE/Teron CX (G3 only) 14 - AmigaOne SE/Teron CX (G3 only)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3c7c3f82d842..fb59c46e9e9e 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1850,9 +1850,16 @@ out:
1850 return ret; 1850 return ret;
1851} 1851}
1852 1852
1853static int spufs_mfc_fsync(struct file *file, int datasync) 1853static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1854{ 1854{
1855 return spufs_mfc_flush(file, NULL); 1855 struct inode *inode = file->f_path.dentry->d_inode;
1856 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1857 if (!err) {
1858 mutex_lock(&inode->i_mutex);
1859 err = spufs_mfc_flush(file, NULL);
1860 mutex_unlock(&inode->i_mutex);
1861 }
1862 return err;
1856} 1863}
1857 1864
1858static int spufs_mfc_fasync(int fd, struct file *file, int on) 1865static int spufs_mfc_fasync(int fd, struct file *file, int on)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 856e9c398068..e481f6b9a789 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -611,15 +611,14 @@ out:
611 611
612static struct file_system_type spufs_type; 612static struct file_system_type spufs_type;
613 613
614long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, 614long spufs_create(struct path *path, struct dentry *dentry,
615 struct file *filp) 615 unsigned int flags, mode_t mode, struct file *filp)
616{ 616{
617 struct dentry *dentry;
618 int ret; 617 int ret;
619 618
620 ret = -EINVAL; 619 ret = -EINVAL;
621 /* check if we are on spufs */ 620 /* check if we are on spufs */
622 if (nd->path.dentry->d_sb->s_type != &spufs_type) 621 if (path->dentry->d_sb->s_type != &spufs_type)
623 goto out; 622 goto out;
624 623
625 /* don't accept undefined flags */ 624 /* don't accept undefined flags */
@@ -627,33 +626,27 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
627 goto out; 626 goto out;
628 627
629 /* only threads can be underneath a gang */ 628 /* only threads can be underneath a gang */
630 if (nd->path.dentry != nd->path.dentry->d_sb->s_root) { 629 if (path->dentry != path->dentry->d_sb->s_root) {
631 if ((flags & SPU_CREATE_GANG) || 630 if ((flags & SPU_CREATE_GANG) ||
632 !SPUFS_I(nd->path.dentry->d_inode)->i_gang) 631 !SPUFS_I(path->dentry->d_inode)->i_gang)
633 goto out; 632 goto out;
634 } 633 }
635 634
636 dentry = lookup_create(nd, 1);
637 ret = PTR_ERR(dentry);
638 if (IS_ERR(dentry))
639 goto out_dir;
640
641 mode &= ~current_umask(); 635 mode &= ~current_umask();
642 636
643 if (flags & SPU_CREATE_GANG) 637 if (flags & SPU_CREATE_GANG)
644 ret = spufs_create_gang(nd->path.dentry->d_inode, 638 ret = spufs_create_gang(path->dentry->d_inode,
645 dentry, nd->path.mnt, mode); 639 dentry, path->mnt, mode);
646 else 640 else
647 ret = spufs_create_context(nd->path.dentry->d_inode, 641 ret = spufs_create_context(path->dentry->d_inode,
648 dentry, nd->path.mnt, flags, mode, 642 dentry, path->mnt, flags, mode,
649 filp); 643 filp);
650 if (ret >= 0) 644 if (ret >= 0)
651 fsnotify_mkdir(nd->path.dentry->d_inode, dentry); 645 fsnotify_mkdir(path->dentry->d_inode, dentry);
652 return ret; 646 return ret;
653 647
654out_dir:
655 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
656out: 648out:
649 mutex_unlock(&path->dentry->d_inode->i_mutex);
657 return ret; 650 return ret;
658} 651}
659 652
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index c448bac65518..099245f230b2 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -248,7 +248,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
248/* system call implementation */ 248/* system call implementation */
249extern struct spufs_calls spufs_calls; 249extern struct spufs_calls spufs_calls;
250long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 250long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
251long spufs_create(struct nameidata *nd, unsigned int flags, 251long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
252 mode_t mode, struct file *filp); 252 mode_t mode, struct file *filp);
253/* ELF coredump callbacks for writing SPU ELF notes */ 253/* ELF coredump callbacks for writing SPU ELF notes */
254extern int spufs_coredump_extra_notes_size(void); 254extern int spufs_coredump_extra_notes_size(void);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index a3d2ce54ea2e..609e016e92d0 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -62,21 +62,17 @@ out:
62static long do_spu_create(const char __user *pathname, unsigned int flags, 62static long do_spu_create(const char __user *pathname, unsigned int flags,
63 mode_t mode, struct file *neighbor) 63 mode_t mode, struct file *neighbor)
64{ 64{
65 char *tmp; 65 struct path path;
66 struct dentry *dentry;
66 int ret; 67 int ret;
67 68
68 tmp = getname(pathname); 69 dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
69 ret = PTR_ERR(tmp); 70 ret = PTR_ERR(dentry);
70 if (!IS_ERR(tmp)) { 71 if (!IS_ERR(dentry)) {
71 struct nameidata nd; 72 ret = spufs_create(&path, dentry, flags, mode, neighbor);
72 73 mutex_unlock(&path.dentry->d_inode->i_mutex);
73 ret = kern_path_parent(tmp, &nd); 74 dput(dentry);
74 if (!ret) { 75 path_put(&path);
75 nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
76 ret = spufs_create(&nd, flags, mode, neighbor);
77 path_put(&nd.path);
78 }
79 putname(tmp);
80 } 76 }
81 77
82 return ret; 78 return ret;
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index bc0b0efdc5fe..d3cdab582c5d 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -1,6 +1,7 @@
1config PPC_CHRP 1config PPC_CHRP
2 bool "Common Hardware Reference Platform (CHRP) based machines" 2 bool "Common Hardware Reference Platform (CHRP) based machines"
3 depends on 6xx 3 depends on 6xx
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PPC_I8259 6 select PPC_I8259
6 select PPC_INDIRECT_PCI 7 select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f33e08d573ce..abe8d7e2ebeb 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/of_pci.h>
20 21
21#include <asm/sections.h> 22#include <asm/sections.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
235 236
236 if (offset >= 0x100) 237 if (offset >= 0x100)
237 return PCIBIOS_BAD_REGISTER_NUMBER; 238 return PCIBIOS_BAD_REGISTER_NUMBER;
238 np = pci_busdev_to_OF_node(bus, devfn); 239 np = of_pci_find_child_device(bus->dev.of_node, devfn);
239 if (np == NULL) 240 if (np == NULL)
240 return PCIBIOS_DEVICE_NOT_FOUND; 241 return PCIBIOS_DEVICE_NOT_FOUND;
241 242
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
index bf8330ef2e76..f0536c7cda9f 100644
--- a/arch/powerpc/platforms/prep/Kconfig
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -1,6 +1,7 @@
1config PPC_PREP 1config PPC_PREP
2 bool "PowerPC Reference Platform (PReP) based machines" 2 bool "PowerPC Reference Platform (PReP) based machines"
3 depends on 6xx && BROKEN 3 depends on 6xx && BROKEN
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PPC_I8259 6 select PPC_I8259
6 select PPC_INDIRECT_PCI 7 select PPC_INDIRECT_PCI
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 71af4c5d6c05..05cf4769b88c 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -1,6 +1,7 @@
1config PPC_PSERIES 1config PPC_PSERIES
2 depends on PPC64 && PPC_BOOK3S 2 depends on PPC64 && PPC_BOOK3S
3 bool "IBM pSeries & new (POWER5-based) iSeries" 3 bool "IBM pSeries & new (POWER5-based) iSeries"
4 select HAVE_PCSPKR_PLATFORM
4 select MPIC 5 select MPIC
5 select PCI_MSI 6 select PCI_MSI
6 select PPC_XICS 7 select PPC_XICS
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 33867ec4a234..9d6a8effeda2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,6 +12,8 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/memblock.h> 13#include <linux/memblock.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/memory.h>
16
15#include <asm/firmware.h> 17#include <asm/firmware.h>
16#include <asm/machdep.h> 18#include <asm/machdep.h>
17#include <asm/pSeries_reconfig.h> 19#include <asm/pSeries_reconfig.h>
@@ -20,24 +22,25 @@
20static unsigned long get_memblock_size(void) 22static unsigned long get_memblock_size(void)
21{ 23{
22 struct device_node *np; 24 struct device_node *np;
23 unsigned int memblock_size = 0; 25 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
26 struct resource r;
24 27
25 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 28 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
26 if (np) { 29 if (np) {
27 const unsigned long *size; 30 const __be64 *size;
28 31
29 size = of_get_property(np, "ibm,lmb-size", NULL); 32 size = of_get_property(np, "ibm,lmb-size", NULL);
30 memblock_size = size ? *size : 0; 33 if (size)
31 34 memblock_size = be64_to_cpup(size);
32 of_node_put(np); 35 of_node_put(np);
33 } else { 36 } else if (machine_is(pseries)) {
37 /* This fallback really only applies to pseries */
34 unsigned int memzero_size = 0; 38 unsigned int memzero_size = 0;
35 const unsigned int *regs;
36 39
37 np = of_find_node_by_path("/memory@0"); 40 np = of_find_node_by_path("/memory@0");
38 if (np) { 41 if (np) {
39 regs = of_get_property(np, "reg", NULL); 42 if (!of_address_to_resource(np, 0, &r))
40 memzero_size = regs ? regs[3] : 0; 43 memzero_size = resource_size(&r);
41 of_node_put(np); 44 of_node_put(np);
42 } 45 }
43 46
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
50 sprintf(buf, "/memory@%x", memzero_size); 53 sprintf(buf, "/memory@%x", memzero_size);
51 np = of_find_node_by_path(buf); 54 np = of_find_node_by_path(buf);
52 if (np) { 55 if (np) {
53 regs = of_get_property(np, "reg", NULL); 56 if (!of_address_to_resource(np, 0, &r))
54 memblock_size = regs ? regs[3] : 0; 57 memblock_size = resource_size(&r);
55 of_node_put(np); 58 of_node_put(np);
56 } 59 }
57 } 60 }
58 } 61 }
59
60 return memblock_size; 62 return memblock_size;
61} 63}
62 64
65/* WARNING: This is going to override the generic definition whenever
66 * pseries is built-in regardless of what platform is active at boot
67 * time. This is fine for now as this is the only "option" and it
68 * should work everywhere. If not, we'll have to turn this into a
69 * ppc_md. callback
70 */
63unsigned long memory_block_size_bytes(void) 71unsigned long memory_block_size_bytes(void)
64{ 72{
65 return get_memblock_size(); 73 return get_memblock_size();
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 5ed8d64fc2ed..0317a3547cb9 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -1,15 +1,12 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * s390 implementation of the SHA256 Secure Hash Algorithm. 4 * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright IBM Corp. 2005,2007 7 * Copyright IBM Corp. 2005,2011
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/sha256_generic.c"
11 * and "arch/s390/crypto/sha1_s390.c"
12 *
13 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
@@ -65,7 +62,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
65 return 0; 62 return 0;
66} 63}
67 64
68static struct shash_alg alg = { 65static struct shash_alg sha256_alg = {
69 .digestsize = SHA256_DIGEST_SIZE, 66 .digestsize = SHA256_DIGEST_SIZE,
70 .init = sha256_init, 67 .init = sha256_init,
71 .update = s390_sha_update, 68 .update = s390_sha_update,
@@ -84,22 +81,69 @@ static struct shash_alg alg = {
84 } 81 }
85}; 82};
86 83
87static int sha256_s390_init(void) 84static int sha224_init(struct shash_desc *desc)
88{ 85{
86 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
87
88 sctx->state[0] = SHA224_H0;
89 sctx->state[1] = SHA224_H1;
90 sctx->state[2] = SHA224_H2;
91 sctx->state[3] = SHA224_H3;
92 sctx->state[4] = SHA224_H4;
93 sctx->state[5] = SHA224_H5;
94 sctx->state[6] = SHA224_H6;
95 sctx->state[7] = SHA224_H7;
96 sctx->count = 0;
97 sctx->func = KIMD_SHA_256;
98
99 return 0;
100}
101
102static struct shash_alg sha224_alg = {
103 .digestsize = SHA224_DIGEST_SIZE,
104 .init = sha224_init,
105 .update = s390_sha_update,
106 .final = s390_sha_final,
107 .export = sha256_export,
108 .import = sha256_import,
109 .descsize = sizeof(struct s390_sha_ctx),
110 .statesize = sizeof(struct sha256_state),
111 .base = {
112 .cra_name = "sha224",
113 .cra_driver_name= "sha224-s390",
114 .cra_priority = CRYPT_S390_PRIORITY,
115 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
116 .cra_blocksize = SHA224_BLOCK_SIZE,
117 .cra_module = THIS_MODULE,
118 }
119};
120
121static int __init sha256_s390_init(void)
122{
123 int ret;
124
89 if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA)) 125 if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
90 return -EOPNOTSUPP; 126 return -EOPNOTSUPP;
91 127 ret = crypto_register_shash(&sha256_alg);
92 return crypto_register_shash(&alg); 128 if (ret < 0)
129 goto out;
130 ret = crypto_register_shash(&sha224_alg);
131 if (ret < 0)
132 crypto_unregister_shash(&sha256_alg);
133out:
134 return ret;
93} 135}
94 136
95static void __exit sha256_s390_fini(void) 137static void __exit sha256_s390_fini(void)
96{ 138{
97 crypto_unregister_shash(&alg); 139 crypto_unregister_shash(&sha224_alg);
140 crypto_unregister_shash(&sha256_alg);
98} 141}
99 142
100module_init(sha256_s390_init); 143module_init(sha256_s390_init);
101module_exit(sha256_s390_fini); 144module_exit(sha256_s390_fini);
102 145
103MODULE_ALIAS("sha256"); 146MODULE_ALIAS("sha256");
147MODULE_ALIAS("sha224");
104MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
105MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); 149MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a65d2e82f61d..a63d34c3611e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
331{ 331{
332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
333 return; 333 return;
334 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 334 if (current->ptrace)
335 force_sig(SIGTRAP, current); 335 force_sig(SIGTRAP, current);
336} 336}
337 337
@@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
426 return; 426 return;
427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
428 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 428 if (current->ptrace)
429 force_sig(SIGTRAP, current); 429 force_sig(SIGTRAP, current);
430 else 430 else
431 signal = SIGILL; 431 signal = SIGILL;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index f66a1bdbb61d..a21634173a66 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -37,6 +37,5 @@ config KVM
37# OK, it's a little counter-intuitive to do this, but it puts it neatly under 37# OK, it's a little counter-intuitive to do this, but it puts it neatly under
38# the virtualization menu. 38# the virtualization menu.
39source drivers/vhost/Kconfig 39source drivers/vhost/Kconfig
40source drivers/virtio/Kconfig
41 40
42endif # VIRTUALIZATION 41endif # VIRTUALIZATION
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe103e891e7a..095f782a5512 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
299 goto out; 299 goto out;
300 300
301 address = trans_exc_code & __FAIL_ADDR_MASK; 301 address = trans_exc_code & __FAIL_ADDR_MASK;
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY; 303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE; 305 flags |= FAULT_FLAG_WRITE;
@@ -345,11 +345,11 @@ retry:
345 if (flags & FAULT_FLAG_ALLOW_RETRY) { 345 if (flags & FAULT_FLAG_ALLOW_RETRY) {
346 if (fault & VM_FAULT_MAJOR) { 346 if (fault & VM_FAULT_MAJOR) {
347 tsk->maj_flt++; 347 tsk->maj_flt++;
348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
349 regs, address); 349 regs, address);
350 } else { 350 } else {
351 tsk->min_flt++; 351 tsk->min_flt++;
352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
353 regs, address); 353 regs, address);
354 } 354 }
355 if (fault & VM_FAULT_RETRY) { 355 if (fault & VM_FAULT_RETRY) {
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index bbdeb48bbf8e..748ff1920068 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -897,20 +897,4 @@ source "security/Kconfig"
897 897
898source "crypto/Kconfig" 898source "crypto/Kconfig"
899 899
900menuconfig VIRTUALIZATION
901 bool "Virtualization"
902 default n
903 ---help---
904 Say Y here to get to see options for using your Linux host to run other
905 operating systems inside virtual machines (guests).
906 This option alone does not add any kernel code.
907
908 If you say N, all options in this submenu will be skipped and disabled.
909
910if VIRTUALIZATION
911
912source drivers/virtio/Kconfig
913
914endif # VIRTUALIZATION
915
916source "lib/Kconfig" 900source "lib/Kconfig"
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index 748955df018d..fa4f724b295a 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -180,6 +180,21 @@ static const int sh7750_cache_events
180 [ C(RESULT_MISS) ] = -1, 180 [ C(RESULT_MISS) ] = -1,
181 }, 181 },
182 }, 182 },
183
184 [ C(NODE) ] = {
185 [ C(OP_READ) ] = {
186 [ C(RESULT_ACCESS) ] = -1,
187 [ C(RESULT_MISS) ] = -1,
188 },
189 [ C(OP_WRITE) ] = {
190 [ C(RESULT_ACCESS) ] = -1,
191 [ C(RESULT_MISS) ] = -1,
192 },
193 [ C(OP_PREFETCH) ] = {
194 [ C(RESULT_ACCESS) ] = -1,
195 [ C(RESULT_MISS) ] = -1,
196 },
197 },
183}; 198};
184 199
185static int sh7750_event_map(int event) 200static int sh7750_event_map(int event)
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index 17e6bebfede0..84a2c396ceee 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -205,6 +205,21 @@ static const int sh4a_cache_events
205 [ C(RESULT_MISS) ] = -1, 205 [ C(RESULT_MISS) ] = -1,
206 }, 206 },
207 }, 207 },
208
209 [ C(NODE) ] = {
210 [ C(OP_READ) ] = {
211 [ C(RESULT_ACCESS) ] = -1,
212 [ C(RESULT_MISS) ] = -1,
213 },
214 [ C(OP_WRITE) ] = {
215 [ C(RESULT_ACCESS) ] = -1,
216 [ C(RESULT_MISS) ] = -1,
217 },
218 [ C(OP_PREFETCH) ] = {
219 [ C(RESULT_ACCESS) ] = -1,
220 [ C(RESULT_MISS) ] = -1,
221 },
222 },
208}; 223};
209 224
210static int sh4a_event_map(int event) 225static int sh4a_event_map(int event)
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 64c807c39208..bf280c812d2f 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -256,7 +256,7 @@ out:
256 return ret; 256 return ret;
257} 257}
258 258
259static struct dev_power_domain default_power_domain = { 259static struct dev_pm_domain default_pm_domain = {
260 .ops = { 260 .ops = {
261 .runtime_suspend = default_platform_runtime_suspend, 261 .runtime_suspend = default_platform_runtime_suspend,
262 .runtime_resume = default_platform_runtime_resume, 262 .runtime_resume = default_platform_runtime_resume,
@@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
285 hwblk_disable(hwblk_info, hwblk); 285 hwblk_disable(hwblk_info, hwblk);
286 /* make sure driver re-inits itself once */ 286 /* make sure driver re-inits itself once */
287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
288 dev->pwr_domain = &default_power_domain; 288 dev->pm_domain = &default_pm_domain;
289 break; 289 break;
290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ 290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
291 case BUS_NOTIFY_BOUND_DRIVER: 291 case BUS_NOTIFY_BOUND_DRIVER:
@@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
300 break; 300 break;
301 case BUS_NOTIFY_DEL_DEVICE: 301 case BUS_NOTIFY_DEL_DEVICE:
302 dev->pwr_domain = NULL; 302 dev->pm_domain = NULL;
303 break; 303 break;
304 } 304 }
305 return 0; 305 return 0;
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3d7b209b2178..92b3c276339a 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
63 return 0; 63 return 0;
64} 64}
65 65
66void ptrace_triggered(struct perf_event *bp, int nmi, 66void ptrace_triggered(struct perf_event *bp,
67 struct perf_sample_data *data, struct pt_regs *regs) 67 struct perf_sample_data *data, struct pt_regs *regs)
68{ 68{
69 struct perf_event_attr attr; 69 struct perf_event_attr attr;
@@ -91,7 +91,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
91 attr.bp_len = HW_BREAKPOINT_LEN_2; 91 attr.bp_len = HW_BREAKPOINT_LEN_2;
92 attr.bp_type = HW_BREAKPOINT_R; 92 attr.bp_type = HW_BREAKPOINT_R;
93 93
94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); 94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
95 NULL, tsk);
95 if (IS_ERR(bp)) 96 if (IS_ERR(bp))
96 return PTR_ERR(bp); 97 return PTR_ERR(bp);
97 98
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index b51a17104b5f..d9006f8ffc14 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
393 */ 393 */
394 if (!expected) { 394 if (!expected) {
395 unaligned_fixups_notify(current, instruction, regs); 395 unaligned_fixups_notify(current, instruction, regs);
396 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, 396 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
397 regs, address); 397 regs, address);
398 } 398 }
399 399
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 6713ca97e553..67110be83fd7 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
434 return error; 434 return error;
435 } 435 }
436 436
437 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); 437 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
438 438
439 destreg = (opcode >> 4) & 0x3f; 439 destreg = (opcode >> 4) & 0x3f;
440 if (user_mode(regs)) { 440 if (user_mode(regs)) {
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
512 return error; 512 return error;
513 } 513 }
514 514
515 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); 515 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
516 516
517 srcreg = (opcode >> 4) & 0x3f; 517 srcreg = (opcode >> 4) & 0x3f;
518 if (user_mode(regs)) { 518 if (user_mode(regs)) {
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
588 return error; 588 return error;
589 } 589 }
590 590
591 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); 591 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
592 592
593 destreg = (opcode >> 4) & 0x3f; 593 destreg = (opcode >> 4) & 0x3f;
594 if (user_mode(regs)) { 594 if (user_mode(regs)) {
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
665 return error; 665 return error;
666 } 666 }
667 667
668 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); 668 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
669 669
670 srcreg = (opcode >> 4) & 0x3f; 670 srcreg = (opcode >> 4) & 0x3f;
671 if (user_mode(regs)) { 671 if (user_mode(regs)) {
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c
index f76a5090d5d1..977195210653 100644
--- a/arch/sh/math-emu/math.c
+++ b/arch/sh/math-emu/math.c
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
620 struct task_struct *tsk = current; 620 struct task_struct *tsk = current;
621 struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); 621 struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
622 622
623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
624 624
625 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { 625 if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
626 /* initialize once. */ 626 /* initialize once. */
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index d4c34d757f0d..7bebd044f2a1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
160 if ((regs->sr & SR_IMASK) != SR_IMASK) 160 if ((regs->sr & SR_IMASK) != SR_IMASK)
161 local_irq_enable(); 161 local_irq_enable();
162 162
163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
164 164
165 /* 165 /*
166 * If we're in an interrupt, have no user context or are running 166 * If we're in an interrupt, have no user context or are running
@@ -210,11 +210,11 @@ good_area:
210 } 210 }
211 if (fault & VM_FAULT_MAJOR) { 211 if (fault & VM_FAULT_MAJOR) {
212 tsk->maj_flt++; 212 tsk->maj_flt++;
213 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 213 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
214 regs, address); 214 regs, address);
215 } else { 215 } else {
216 tsk->min_flt++; 216 tsk->min_flt++;
217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
218 regs, address); 218 regs, address);
219 } 219 }
220 220
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 7f5810f5dfdc..e3430e093d43 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
116 /* Not an IO address, so reenable interrupts */ 116 /* Not an IO address, so reenable interrupts */
117 local_irq_enable(); 117 local_irq_enable();
118 118
119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
120 120
121 /* 121 /*
122 * If we're in an interrupt or have no user 122 * If we're in an interrupt or have no user
@@ -200,11 +200,11 @@ good_area:
200 200
201 if (fault & VM_FAULT_MAJOR) { 201 if (fault & VM_FAULT_MAJOR) {
202 tsk->maj_flt++; 202 tsk->maj_flt++;
203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
204 regs, address); 204 regs, address);
205 } else { 205 } else {
206 tsk->min_flt++; 206 tsk->min_flt++;
207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
208 regs, address); 208 regs, address);
209 } 209 }
210 210
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index d4d0711de0f9..14848909e0de 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
18extern unsigned long arch_local_irq_save(void); 18extern unsigned long arch_local_irq_save(void);
19extern void arch_local_irq_enable(void); 19extern void arch_local_irq_enable(void);
20 20
21static inline unsigned long arch_local_save_flags(void) 21static inline notrace unsigned long arch_local_save_flags(void)
22{ 22{
23 unsigned long flags; 23 unsigned long flags;
24 24
@@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29static inline void arch_local_irq_disable(void) 29static inline notrace void arch_local_irq_disable(void)
30{ 30{
31 arch_local_irq_save(); 31 arch_local_irq_save();
32} 32}
33 33
34static inline bool arch_irqs_disabled_flags(unsigned long flags) 34static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
35{ 35{
36 return (flags & PSR_PIL) != 0; 36 return (flags & PSR_PIL) != 0;
37} 37}
38 38
39static inline bool arch_irqs_disabled(void) 39static inline notrace bool arch_irqs_disabled(void)
40{ 40{
41 return arch_irqs_disabled_flags(arch_local_save_flags()); 41 return arch_irqs_disabled_flags(arch_local_save_flags());
42} 42}
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index aab969c82c2b..23cd27f6beb4 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -14,7 +14,7 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17static inline unsigned long arch_local_save_flags(void) 17static inline notrace unsigned long arch_local_save_flags(void)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
20 20
@@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29static inline void arch_local_irq_restore(unsigned long flags) 29static inline notrace void arch_local_irq_restore(unsigned long flags)
30{ 30{
31 __asm__ __volatile__( 31 __asm__ __volatile__(
32 "wrpr %0, %%pil" 32 "wrpr %0, %%pil"
@@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
36 ); 36 );
37} 37}
38 38
39static inline void arch_local_irq_disable(void) 39static inline notrace void arch_local_irq_disable(void)
40{ 40{
41 __asm__ __volatile__( 41 __asm__ __volatile__(
42 "wrpr %0, %%pil" 42 "wrpr %0, %%pil"
@@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
46 ); 46 );
47} 47}
48 48
49static inline void arch_local_irq_enable(void) 49static inline notrace void arch_local_irq_enable(void)
50{ 50{
51 __asm__ __volatile__( 51 __asm__ __volatile__(
52 "wrpr 0, %%pil" 52 "wrpr 0, %%pil"
@@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
56 ); 56 );
57} 57}
58 58
59static inline int arch_irqs_disabled_flags(unsigned long flags) 59static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
60{ 60{
61 return (flags > 0); 61 return (flags > 0);
62} 62}
63 63
64static inline int arch_irqs_disabled(void) 64static inline notrace int arch_irqs_disabled(void)
65{ 65{
66 return arch_irqs_disabled_flags(arch_local_save_flags()); 66 return arch_irqs_disabled_flags(arch_local_save_flags());
67} 67}
68 68
69static inline unsigned long arch_local_irq_save(void) 69static inline notrace unsigned long arch_local_irq_save(void)
70{ 70{
71 unsigned long flags, tmp; 71 unsigned long flags, tmp;
72 72
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 862e3ce92b15..02939abd356c 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -42,9 +42,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
42} 42}
43#endif 43#endif
44 44
45struct device_node;
46extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
47
48#endif /* __KERNEL__ */ 45#endif /* __KERNEL__ */
49 46
50#ifndef CONFIG_LEON_PCI 47#ifndef CONFIG_LEON_PCI
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 948b686ec089..2614d96141c9 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
91 return PCI_IRQ_NONE; 91 return PCI_IRQ_NONE;
92} 92}
93 93
94struct device_node;
95extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
96
97#define HAVE_ARCH_PCI_RESOURCE_TO_USER 94#define HAVE_ARCH_PCI_RESOURCE_TO_USER
98extern void pci_resource_to_user(const struct pci_dev *dev, int bar, 95extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
99 const struct resource *rsrc, 96 const struct resource *rsrc,
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index c7ad3fe2b252..b928b31424b1 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \
205} while (0) 205} while (0)
206#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) 206#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
207#define instruction_pointer(regs) ((regs)->tpc) 207#define instruction_pointer(regs) ((regs)->tpc)
208#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
208#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) 209#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
209#define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) 210#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
210#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 9fe08a1ea6c6..f445e98463e6 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
293 WRITE_PAUSE 293 WRITE_PAUSE
294 wr %l4, PSR_ET, %psr 294 wr %l4, PSR_ET, %psr
295 WRITE_PAUSE 295 WRITE_PAUSE
296 sll %o3, 28, %o2 ! shift for simpler checks below 296 srl %o3, 28, %o2 ! shift for simpler checks below
297maybe_smp4m_msg_check_single: 297maybe_smp4m_msg_check_single:
298 andcc %o2, 0x1, %g0 298 andcc %o2, 0x1, %g0
299 beq,a maybe_smp4m_msg_check_mask 299 beq,a maybe_smp4m_msg_check_mask
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 713dc91020a6..80a87e2a3e7c 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
284 dev->sysdata = node; 284 dev->sysdata = node;
285 dev->dev.parent = bus->bridge; 285 dev->dev.parent = bus->bridge;
286 dev->dev.bus = &pci_bus_type; 286 dev->dev.bus = &pci_bus_type;
287 dev->dev.of_node = node; 287 dev->dev.of_node = of_node_get(node);
288 dev->devfn = devfn; 288 dev->devfn = devfn;
289 dev->multifunction = 0; /* maybe a lie? */ 289 dev->multifunction = 0; /* maybe a lie? */
290 set_pcie_port_type(dev); 290 set_pcie_port_type(dev);
@@ -1021,12 +1021,6 @@ void arch_teardown_msi_irq(unsigned int irq)
1021} 1021}
1022#endif /* !(CONFIG_PCI_MSI) */ 1022#endif /* !(CONFIG_PCI_MSI) */
1023 1023
1024struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
1025{
1026 return pdev->dev.of_node;
1027}
1028EXPORT_SYMBOL(pci_device_to_OF_node);
1029
1030static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) 1024static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1031{ 1025{
1032 struct pci_dev *ali_isa_bridge; 1026 struct pci_dev *ali_isa_bridge;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 948601a066ff..a19f04195478 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource)
885 return -ENXIO; 885 return -ENXIO;
886} 886}
887 887
888struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
889{
890 struct pcidev_cookie *pc = pdev->sysdata;
891
892 return pc->prom_node;
893}
894EXPORT_SYMBOL(pci_device_to_OF_node);
895
896/* 888/*
897 * This probably belongs here rather than ioport.c because 889 * This probably belongs here rather than ioport.c because
898 * we do not want this crud linked into SBus kernels. 890 * we do not want this crud linked into SBus kernels.
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2cb0e1c001e2..62a034318b18 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = {
246 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, 246 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
247 }, 247 },
248}, 248},
249[C(NODE)] = {
250 [C(OP_READ)] = {
251 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
252 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
253 },
254 [ C(OP_WRITE) ] = {
255 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
256 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
257 },
258 [ C(OP_PREFETCH) ] = {
259 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
260 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
261 },
262},
249}; 263};
250 264
251static const struct sparc_pmu ultra3_pmu = { 265static const struct sparc_pmu ultra3_pmu = {
@@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = {
361 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, 375 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
362 }, 376 },
363}, 377},
378[C(NODE)] = {
379 [C(OP_READ)] = {
380 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
381 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
382 },
383 [ C(OP_WRITE) ] = {
384 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
385 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
386 },
387 [ C(OP_PREFETCH) ] = {
388 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
389 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
390 },
391},
364}; 392};
365 393
366static const struct sparc_pmu niagara1_pmu = { 394static const struct sparc_pmu niagara1_pmu = {
@@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = {
473 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, 501 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
474 }, 502 },
475}, 503},
504[C(NODE)] = {
505 [C(OP_READ)] = {
506 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
507 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
508 },
509 [ C(OP_WRITE) ] = {
510 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
511 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
512 },
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
515 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
516 },
517},
476}; 518};
477 519
478static const struct sparc_pmu niagara2_pmu = { 520static const struct sparc_pmu niagara2_pmu = {
@@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1277 if (!sparc_perf_event_set_period(event, hwc, idx)) 1319 if (!sparc_perf_event_set_period(event, hwc, idx))
1278 continue; 1320 continue;
1279 1321
1280 if (perf_event_overflow(event, 1, &data, regs)) 1322 if (perf_event_overflow(event, &data, regs))
1281 sparc_pmu_stop(event, 0); 1323 sparc_pmu_stop(event, 0);
1282 } 1324 }
1283 1325
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 4491f4cb2695..7efbb2f9e77f 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
247 unsigned long addr = compute_effective_address(regs, insn); 247 unsigned long addr = compute_effective_address(regs, insn);
248 int err; 248 int err;
249 249
250 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 250 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
251 switch (dir) { 251 switch (dir) {
252 case load: 252 case load:
253 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 253 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
338 } 338 }
339 339
340 addr = compute_effective_address(regs, insn); 340 addr = compute_effective_address(regs, insn);
341 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 341 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
342 switch(dir) { 342 switch(dir) {
343 case load: 343 case load:
344 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 344 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index b2b019ea8caa..35cff1673aa4 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
317 317
318 addr = compute_effective_address(regs, insn, 318 addr = compute_effective_address(regs, insn,
319 ((insn >> 25) & 0x1f)); 319 ((insn >> 25) & 0x1f));
320 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 320 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
321 switch (asi) { 321 switch (asi) {
322 case ASI_NL: 322 case ASI_NL:
323 case ASI_AIUPL: 323 case ASI_AIUPL:
@@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
384 int ret, i, rd = ((insn >> 25) & 0x1f); 384 int ret, i, rd = ((insn >> 25) & 0x1f);
385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
386 386
387 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 387 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
388 if (insn & 0x2000) { 388 if (insn & 0x2000) {
389 maybe_flush_windows(0, 0, rd, from_kernel); 389 maybe_flush_windows(0, 0, rd, from_kernel);
390 value = sign_extend_imm13(insn); 390 value = sign_extend_imm13(insn);
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
431 int asi = decode_asi(insn, regs); 431 int asi = decode_asi(insn, regs);
432 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 432 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
433 433
434 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 434 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
435 435
436 save_and_clear_fpu(); 436 save_and_clear_fpu();
437 current_thread_info()->xfsr[0] &= ~0x1c000; 437 current_thread_info()->xfsr[0] &= ~0x1c000;
@@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
554 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 554 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
555 unsigned long *reg; 555 unsigned long *reg;
556 556
557 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 557 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
558 558
559 maybe_flush_windows(0, 0, rd, from_kernel); 559 maybe_flush_windows(0, 0, rd, from_kernel);
560 reg = fetch_reg_addr(rd, regs); 560 reg = fetch_reg_addr(rd, regs);
@@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
586 586
587 if (tstate & TSTATE_PRIV) 587 if (tstate & TSTATE_PRIV)
588 die_if_kernel("lddfmna from kernel", regs); 588 die_if_kernel("lddfmna from kernel", regs);
589 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 589 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
590 if (test_thread_flag(TIF_32BIT)) 590 if (test_thread_flag(TIF_32BIT))
591 pc = (u32)pc; 591 pc = (u32)pc;
592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
647 647
648 if (tstate & TSTATE_PRIV) 648 if (tstate & TSTATE_PRIV)
649 die_if_kernel("stdfmna from kernel", regs); 649 die_if_kernel("stdfmna from kernel", regs);
650 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 650 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
651 if (test_thread_flag(TIF_32BIT)) 651 if (test_thread_flag(TIF_32BIT))
652 pc = (u32)pc; 652 pc = (u32)pc;
653 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 653 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 36357717d691..32b626c9d815 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
802 802
803 BUG_ON(regs->tstate & TSTATE_PRIV); 803 BUG_ON(regs->tstate & TSTATE_PRIV);
804 804
805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
806 806
807 if (test_thread_flag(TIF_32BIT)) 807 if (test_thread_flag(TIF_32BIT))
808 pc = (u32)pc; 808 pc = (u32)pc;
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index a3fccde894ec..aa4d55b0bdf0 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
164 int retcode = 0; /* assume all succeed */ 164 int retcode = 0; /* assume all succeed */
165 unsigned long insn; 165 unsigned long insn;
166 166
167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
168 168
169#ifdef DEBUG_MATHEMU 169#ifdef DEBUG_MATHEMU
170 printk("In do_mathemu()... pc is %08lx\n", regs->pc); 170 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 56d2c44747b8..e575bd2fe381 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
184 184
185 if (tstate & TSTATE_PRIV) 185 if (tstate & TSTATE_PRIV)
186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs); 186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
188 if (test_thread_flag(TIF_32BIT)) 188 if (test_thread_flag(TIF_32BIT))
189 pc = (u32)pc; 189 pc = (u32)pc;
190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7543ddbdadb2..aa1c1b1ce5cc 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
251 if (in_atomic() || !mm) 251 if (in_atomic() || !mm)
252 goto no_context; 252 goto no_context;
253 253
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
255 255
256 down_read(&mm->mmap_sem); 256 down_read(&mm->mmap_sem);
257 257
@@ -301,12 +301,10 @@ good_area:
301 } 301 }
302 if (fault & VM_FAULT_MAJOR) { 302 if (fault & VM_FAULT_MAJOR) {
303 current->maj_flt++; 303 current->maj_flt++;
304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
305 regs, address);
306 } else { 305 } else {
307 current->min_flt++; 306 current->min_flt++;
308 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
309 regs, address);
310 } 308 }
311 up_read(&mm->mmap_sem); 309 up_read(&mm->mmap_sem);
312 return; 310 return;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index f92ce56a8b22..504c0622f729 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
325 if (in_atomic() || !mm) 325 if (in_atomic() || !mm)
326 goto intr_or_no_mm; 326 goto intr_or_no_mm;
327 327
328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 328 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
329 329
330 if (!down_read_trylock(&mm->mmap_sem)) { 330 if (!down_read_trylock(&mm->mmap_sem)) {
331 if ((regs->tstate & TSTATE_PRIV) && 331 if ((regs->tstate & TSTATE_PRIV) &&
@@ -433,12 +433,10 @@ good_area:
433 } 433 }
434 if (fault & VM_FAULT_MAJOR) { 434 if (fault & VM_FAULT_MAJOR) {
435 current->maj_flt++; 435 current->maj_flt++;
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
437 regs, address);
438 } else { 437 } else {
439 current->min_flt++; 438 current->min_flt++;
440 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
441 regs, address);
442 } 440 }
443 up_read(&mm->mmap_sem); 441 up_read(&mm->mmap_sem);
444 442
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index c0e01297e64e..e485a6804998 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
226 * Leon2 and Leon3 differ in their way of telling cache information 226 * Leon2 and Leon3 differ in their way of telling cache information
227 * 227 *
228 */ 228 */
229int leon_flush_needed(void) 229int __init leon_flush_needed(void)
230{ 230{
231 int flush_needed = -1; 231 int flush_needed = -1;
232 unsigned int ssize, sets; 232 unsigned int ssize, sets;
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index b88f9c047781..669fcdba31ea 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -33,6 +33,5 @@ config KVM
33 If unsure, say N. 33 If unsure, say N.
34 34
35source drivers/vhost/Kconfig 35source drivers/vhost/Kconfig
36source drivers/virtio/Kconfig
37 36
38endif # VIRTUALIZATION 37endif # VIRTUALIZATION
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index b1da91c1b200..87b659dadf3f 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
8 8
9obj-$(CONFIG_BINFMT_ELF) += elfcore.o 9obj-$(CONFIG_BINFMT_ELF) += elfcore.o
10 10
11subarch-obj-y = lib/semaphore_32.o lib/string_32.o 11subarch-obj-y = lib/string_32.o
12subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
12subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o 13subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
13subarch-obj-$(CONFIG_MODULES) += kernel/module.o 14subarch-obj-$(CONFIG_MODULES) += kernel/module.o
14 15
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index c1ea9eb04466..61fc99a42e10 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -9,7 +9,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
9 sysrq.o ksyms.o tls.o 9 sysrq.o ksyms.o tls.o
10 10
11subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \ 11subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
12 lib/rwsem_64.o 12 lib/rwsem.o
13subarch-obj-$(CONFIG_MODULES) += kernel/module.o 13subarch-obj-$(CONFIG_MODULES) += kernel/module.o
14 14
15ldt-y = ../sys-i386/ldt.o 15ldt-y = ../sys-i386/ldt.o
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1f03e221a01e..a67e014e4e44 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -20,6 +20,7 @@ config X86
20 select HAVE_UNSTABLE_SCHED_CLOCK 20 select HAVE_UNSTABLE_SCHED_CLOCK
21 select HAVE_IDE 21 select HAVE_IDE
22 select HAVE_OPROFILE 22 select HAVE_OPROFILE
23 select HAVE_PCSPKR_PLATFORM
23 select HAVE_PERF_EVENTS 24 select HAVE_PERF_EVENTS
24 select HAVE_IRQ_WORK 25 select HAVE_IRQ_WORK
25 select HAVE_IOREMAP_PROT 26 select HAVE_IOREMAP_PROT
@@ -70,6 +71,7 @@ config X86
70 select IRQ_FORCED_THREADING 71 select IRQ_FORCED_THREADING
71 select USE_GENERIC_SMP_HELPERS if SMP 72 select USE_GENERIC_SMP_HELPERS if SMP
72 select HAVE_BPF_JIT if (X86_64 && NET) 73 select HAVE_BPF_JIT if (X86_64 && NET)
74 select CLKEVT_I8253
73 75
74config INSTRUCTION_DECODER 76config INSTRUCTION_DECODER
75 def_bool (KPROBES || PERF_EVENTS) 77 def_bool (KPROBES || PERF_EVENTS)
@@ -93,6 +95,10 @@ config CLOCKSOURCE_WATCHDOG
93config GENERIC_CLOCKEVENTS 95config GENERIC_CLOCKEVENTS
94 def_bool y 96 def_bool y
95 97
98config ARCH_CLOCKSOURCE_DATA
99 def_bool y
100 depends on X86_64
101
96config GENERIC_CLOCKEVENTS_BROADCAST 102config GENERIC_CLOCKEVENTS_BROADCAST
97 def_bool y 103 def_bool y
98 depends on X86_64 || (X86_32 && X86_LOCAL_APIC) 104 depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
@@ -384,12 +390,21 @@ config X86_INTEL_CE
384 This option compiles in support for the CE4100 SOC for settop 390 This option compiles in support for the CE4100 SOC for settop
385 boxes and media devices. 391 boxes and media devices.
386 392
393config X86_INTEL_MID
394 bool "Intel MID platform support"
395 depends on X86_32
396 depends on X86_EXTENDED_PLATFORM
397 ---help---
398 Select to build a kernel capable of supporting Intel MID platform
399 systems which do not have the PCI legacy interfaces (Moorestown,
400 Medfield). If you are building for a PC class system say N here.
401
402if X86_INTEL_MID
403
387config X86_MRST 404config X86_MRST
388 bool "Moorestown MID platform" 405 bool "Moorestown MID platform"
389 depends on PCI 406 depends on PCI
390 depends on PCI_GOANY 407 depends on PCI_GOANY
391 depends on X86_32
392 depends on X86_EXTENDED_PLATFORM
393 depends on X86_IO_APIC 408 depends on X86_IO_APIC
394 select APB_TIMER 409 select APB_TIMER
395 select I2C 410 select I2C
@@ -404,6 +419,8 @@ config X86_MRST
404 nor standard legacy replacement devices/features. e.g. Moorestown does 419 nor standard legacy replacement devices/features. e.g. Moorestown does
405 not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. 420 not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
406 421
422endif
423
407config X86_RDC321X 424config X86_RDC321X
408 bool "RDC R-321x SoC" 425 bool "RDC R-321x SoC"
409 depends on X86_32 426 depends on X86_32
@@ -629,6 +646,7 @@ config HPET_EMULATE_RTC
629config APB_TIMER 646config APB_TIMER
630 def_bool y if MRST 647 def_bool y if MRST
631 prompt "Langwell APB Timer Support" if X86_MRST 648 prompt "Langwell APB Timer Support" if X86_MRST
649 select DW_APB_TIMER
632 help 650 help
633 APB timer is the replacement for 8254, HPET on X86 MID platforms. 651 APB timer is the replacement for 8254, HPET on X86 MID platforms.
634 The APBT provides a stable time base on SMP 652 The APBT provides a stable time base on SMP
@@ -692,33 +710,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
692 Calgary anyway, pass 'iommu=calgary' on the kernel command line. 710 Calgary anyway, pass 'iommu=calgary' on the kernel command line.
693 If unsure, say Y. 711 If unsure, say Y.
694 712
695config AMD_IOMMU
696 bool "AMD IOMMU support"
697 select SWIOTLB
698 select PCI_MSI
699 select PCI_IOV
700 depends on X86_64 && PCI && ACPI
701 ---help---
702 With this option you can enable support for AMD IOMMU hardware in
703 your system. An IOMMU is a hardware component which provides
704 remapping of DMA memory accesses from devices. With an AMD IOMMU you
705 can isolate the the DMA memory of different devices and protect the
706 system from misbehaving device drivers or hardware.
707
708 You can find out if your system has an AMD IOMMU if you look into
709 your BIOS for an option to enable it or if you have an IVRS ACPI
710 table.
711
712config AMD_IOMMU_STATS
713 bool "Export AMD IOMMU statistics to debugfs"
714 depends on AMD_IOMMU
715 select DEBUG_FS
716 ---help---
717 This option enables code in the AMD IOMMU driver to collect various
718 statistics about whats happening in the driver and exports that
719 information to userspace via debugfs.
720 If unsure, say N.
721
722# need this always selected by IOMMU for the VIA workaround 713# need this always selected by IOMMU for the VIA workaround
723config SWIOTLB 714config SWIOTLB
724 def_bool y if X86_64 715 def_bool y if X86_64
@@ -732,9 +723,6 @@ config SWIOTLB
732config IOMMU_HELPER 723config IOMMU_HELPER
733 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) 724 def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
734 725
735config IOMMU_API
736 def_bool (AMD_IOMMU || DMAR)
737
738config MAXSMP 726config MAXSMP
739 bool "Enable Maximum number of SMP Processors and NUMA Nodes" 727 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
740 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 728 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
@@ -1182,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1182config AMD_NUMA 1170config AMD_NUMA
1183 def_bool y 1171 def_bool y
1184 prompt "Old style AMD Opteron NUMA detection" 1172 prompt "Old style AMD Opteron NUMA detection"
1185 depends on NUMA && PCI 1173 depends on X86_64 && NUMA && PCI
1186 ---help--- 1174 ---help---
1187 Enable AMD NUMA node topology detection. You should say Y here if 1175 Enable AMD NUMA node topology detection. You should say Y here if
1188 you have a multi processor AMD system. This uses an old method to 1176 you have a multi processor AMD system. This uses an old method to
@@ -1954,55 +1942,6 @@ config PCI_CNB20LE_QUIRK
1954 1942
1955 You should say N unless you know you need this. 1943 You should say N unless you know you need this.
1956 1944
1957config DMAR
1958 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
1959 depends on PCI_MSI && ACPI && EXPERIMENTAL
1960 help
1961 DMA remapping (DMAR) devices support enables independent address
1962 translations for Direct Memory Access (DMA) from devices.
1963 These DMA remapping devices are reported via ACPI tables
1964 and include PCI device scope covered by these DMA
1965 remapping devices.
1966
1967config DMAR_DEFAULT_ON
1968 def_bool y
1969 prompt "Enable DMA Remapping Devices by default"
1970 depends on DMAR
1971 help
1972 Selecting this option will enable a DMAR device at boot time if
1973 one is found. If this option is not selected, DMAR support can
1974 be enabled by passing intel_iommu=on to the kernel. It is
1975 recommended you say N here while the DMAR code remains
1976 experimental.
1977
1978config DMAR_BROKEN_GFX_WA
1979 bool "Workaround broken graphics drivers (going away soon)"
1980 depends on DMAR && BROKEN
1981 ---help---
1982 Current Graphics drivers tend to use physical address
1983 for DMA and avoid using DMA APIs. Setting this config
1984 option permits the IOMMU driver to set a unity map for
1985 all the OS-visible memory. Hence the driver can continue
1986 to use physical addresses for DMA, at least until this
1987 option is removed in the 2.6.32 kernel.
1988
1989config DMAR_FLOPPY_WA
1990 def_bool y
1991 depends on DMAR
1992 ---help---
1993 Floppy disk drivers are known to bypass DMA API calls
1994 thereby failing to work when IOMMU is enabled. This
1995 workaround will setup a 1:1 mapping for the first
1996 16MiB to make floppy (an ISA device) work.
1997
1998config INTR_REMAP
1999 bool "Support for Interrupt Remapping (EXPERIMENTAL)"
2000 depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
2001 ---help---
2002 Supports Interrupt remapping for IO-APIC and MSI devices.
2003 To use x2apic mode in the CPU's which support x2APIC enhancements or
2004 to support platforms with CPU's having > 8 bit APIC ID, say Y.
2005
2006source "drivers/pci/pcie/Kconfig" 1945source "drivers/pci/pcie/Kconfig"
2007 1946
2008source "drivers/pci/Kconfig" 1947source "drivers/pci/Kconfig"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 6a7cfdf8ff69..e3ca7e0d858c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -312,6 +312,9 @@ config X86_CMPXCHG
312config CMPXCHG_LOCAL 312config CMPXCHG_LOCAL
313 def_bool X86_64 || (X86_32 && !M386) 313 def_bool X86_64 || (X86_32 && !M386)
314 314
315config CMPXCHG_DOUBLE
316 def_bool y
317
315config X86_L1_CACHE_SHIFT 318config X86_L1_CACHE_SHIFT
316 int 319 int
317 default "7" if MPENTIUM4 || MPSC 320 default "7" if MPENTIUM4 || MPSC
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index f7cb086b4add..95365a82b6a0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,12 +9,6 @@
9# Changed by many, many contributors over the years. 9# Changed by many, many contributors over the years.
10# 10#
11 11
12# ROOT_DEV specifies the default root-device when making the image.
13# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
14# the default of FLOPPY is used by 'build'.
15
16ROOT_DEV := CURRENT
17
18# If you want to preset the SVGA mode, uncomment the next line and 12# If you want to preset the SVGA mode, uncomment the next line and
19# set SVGA_MODE to whatever number you want. 13# set SVGA_MODE to whatever number you want.
20# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode. 14# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
@@ -75,8 +69,7 @@ GCOV_PROFILE := n
75$(obj)/bzImage: asflags-y := $(SVGA_MODE) 69$(obj)/bzImage: asflags-y := $(SVGA_MODE)
76 70
77quiet_cmd_image = BUILD $@ 71quiet_cmd_image = BUILD $@
78cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \ 72cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@
79 $(ROOT_DEV) > $@
80 73
81$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 74$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
82 $(call if_changed,image) 75 $(call if_changed,image)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ee3a4ea923ac..fdc60a0b3c20 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -130,7 +130,7 @@ static void die(const char * str, ...)
130 130
131static void usage(void) 131static void usage(void)
132{ 132{
133 die("Usage: build setup system [rootdev] [> image]"); 133 die("Usage: build setup system [> image]");
134} 134}
135 135
136int main(int argc, char ** argv) 136int main(int argc, char ** argv)
@@ -138,39 +138,14 @@ int main(int argc, char ** argv)
138 unsigned int i, sz, setup_sectors; 138 unsigned int i, sz, setup_sectors;
139 int c; 139 int c;
140 u32 sys_size; 140 u32 sys_size;
141 u8 major_root, minor_root;
142 struct stat sb; 141 struct stat sb;
143 FILE *file; 142 FILE *file;
144 int fd; 143 int fd;
145 void *kernel; 144 void *kernel;
146 u32 crc = 0xffffffffUL; 145 u32 crc = 0xffffffffUL;
147 146
148 if ((argc < 3) || (argc > 4)) 147 if (argc != 3)
149 usage(); 148 usage();
150 if (argc > 3) {
151 if (!strcmp(argv[3], "CURRENT")) {
152 if (stat("/", &sb)) {
153 perror("/");
154 die("Couldn't stat /");
155 }
156 major_root = major(sb.st_dev);
157 minor_root = minor(sb.st_dev);
158 } else if (strcmp(argv[3], "FLOPPY")) {
159 if (stat(argv[3], &sb)) {
160 perror(argv[3]);
161 die("Couldn't stat root device.");
162 }
163 major_root = major(sb.st_rdev);
164 minor_root = minor(sb.st_rdev);
165 } else {
166 major_root = 0;
167 minor_root = 0;
168 }
169 } else {
170 major_root = DEFAULT_MAJOR_ROOT;
171 minor_root = DEFAULT_MINOR_ROOT;
172 }
173 fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
174 149
175 /* Copy the setup code */ 150 /* Copy the setup code */
176 file = fopen(argv[1], "r"); 151 file = fopen(argv[1], "r");
@@ -193,8 +168,8 @@ int main(int argc, char ** argv)
193 memset(buf+c, 0, i-c); 168 memset(buf+c, 0, i-c);
194 169
195 /* Set the default root device */ 170 /* Set the default root device */
196 buf[508] = minor_root; 171 buf[508] = DEFAULT_MINOR_ROOT;
197 buf[509] = major_root; 172 buf[509] = DEFAULT_MAJOR_ROOT;
198 173
199 fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i); 174 fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
200 175
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 7a6e68e4f748..976aa64d9a20 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -245,7 +245,7 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
245 crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) 245 crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
246 & CRYPTO_TFM_RES_MASK); 246 & CRYPTO_TFM_RES_MASK);
247 247
248 return 0; 248 return err;
249} 249}
250 250
251static int ghash_async_init_tfm(struct crypto_tfm *tfm) 251static int ghash_async_init_tfm(struct crypto_tfm *tfm)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 588a7aa937e1..65577698cab2 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -127,15 +127,17 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
127 127
128asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask) 128asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
129{ 129{
130 mask &= _BLOCKABLE; 130 sigset_t blocked;
131 spin_lock_irq(&current->sighand->siglock); 131
132 current->saved_sigmask = current->blocked; 132 current->saved_sigmask = current->blocked;
133 siginitset(&current->blocked, mask); 133
134 recalc_sigpending(); 134 mask &= _BLOCKABLE;
135 spin_unlock_irq(&current->sighand->siglock); 135 siginitset(&blocked, mask);
136 set_current_blocked(&blocked);
136 137
137 current->state = TASK_INTERRUPTIBLE; 138 current->state = TASK_INTERRUPTIBLE;
138 schedule(); 139 schedule();
140
139 set_restore_sigmask(); 141 set_restore_sigmask();
140 return -ERESTARTNOHAND; 142 return -ERESTARTNOHAND;
141} 143}
@@ -279,10 +281,7 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
279 goto badframe; 281 goto badframe;
280 282
281 sigdelsetmask(&set, ~_BLOCKABLE); 283 sigdelsetmask(&set, ~_BLOCKABLE);
282 spin_lock_irq(&current->sighand->siglock); 284 set_current_blocked(&set);
283 current->blocked = set;
284 recalc_sigpending();
285 spin_unlock_irq(&current->sighand->siglock);
286 285
287 if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) 286 if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
288 goto badframe; 287 goto badframe;
@@ -308,10 +307,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
308 goto badframe; 307 goto badframe;
309 308
310 sigdelsetmask(&set, ~_BLOCKABLE); 309 sigdelsetmask(&set, ~_BLOCKABLE);
311 spin_lock_irq(&current->sighand->siglock); 310 set_current_blocked(&set);
312 current->blocked = set;
313 recalc_sigpending();
314 spin_unlock_irq(&current->sighand->siglock);
315 311
316 if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 312 if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
317 goto badframe; 313 goto badframe;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index c1870dddd322..a0e866d233ee 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -143,7 +143,7 @@ ENTRY(ia32_sysenter_target)
143 CFI_REL_OFFSET rip,0 143 CFI_REL_OFFSET rip,0
144 pushq_cfi %rax 144 pushq_cfi %rax
145 cld 145 cld
146 SAVE_ARGS 0,0,1 146 SAVE_ARGS 0,1,0
147 /* no need to do an access_ok check here because rbp has been 147 /* no need to do an access_ok check here because rbp has been
148 32bit zero extended */ 148 32bit zero extended */
1491: movl (%rbp),%ebp 1491: movl (%rbp),%ebp
@@ -173,7 +173,7 @@ sysexit_from_sys_call:
173 andl $~0x200,EFLAGS-R11(%rsp) 173 andl $~0x200,EFLAGS-R11(%rsp)
174 movl RIP-R11(%rsp),%edx /* User %eip */ 174 movl RIP-R11(%rsp),%edx /* User %eip */
175 CFI_REGISTER rip,rdx 175 CFI_REGISTER rip,rdx
176 RESTORE_ARGS 1,24,1,1,1,1 176 RESTORE_ARGS 0,24,0,0,0,0
177 xorq %r8,%r8 177 xorq %r8,%r8
178 xorq %r9,%r9 178 xorq %r9,%r9
179 xorq %r10,%r10 179 xorq %r10,%r10
@@ -289,7 +289,7 @@ ENTRY(ia32_cstar_target)
289 * disabled irqs and here we enable it straight after entry: 289 * disabled irqs and here we enable it straight after entry:
290 */ 290 */
291 ENABLE_INTERRUPTS(CLBR_NONE) 291 ENABLE_INTERRUPTS(CLBR_NONE)
292 SAVE_ARGS 8,1,1 292 SAVE_ARGS 8,0,0
293 movl %eax,%eax /* zero extension */ 293 movl %eax,%eax /* zero extension */
294 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 294 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
295 movq %rcx,RIP-ARGOFFSET(%rsp) 295 movq %rcx,RIP-ARGOFFSET(%rsp)
@@ -328,7 +328,7 @@ cstar_dispatch:
328 jnz sysretl_audit 328 jnz sysretl_audit
329sysretl_from_sys_call: 329sysretl_from_sys_call:
330 andl $~TS_COMPAT,TI_status(%r10) 330 andl $~TS_COMPAT,TI_status(%r10)
331 RESTORE_ARGS 1,-ARG_SKIP,1,1,1 331 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
332 movl RIP-ARGOFFSET(%rsp),%ecx 332 movl RIP-ARGOFFSET(%rsp),%ecx
333 CFI_REGISTER rip,rcx 333 CFI_REGISTER rip,rcx
334 movl EFLAGS-ARGOFFSET(%rsp),%r11d 334 movl EFLAGS-ARGOFFSET(%rsp),%r11d
@@ -419,7 +419,7 @@ ENTRY(ia32_syscall)
419 cld 419 cld
420 /* note the registers are not zero extended to the sf. 420 /* note the registers are not zero extended to the sf.
421 this could be a problem. */ 421 this could be a problem. */
422 SAVE_ARGS 0,0,1 422 SAVE_ARGS 0,1,0
423 GET_THREAD_INFO(%r10) 423 GET_THREAD_INFO(%r10)
424 orl $TS_COMPAT,TI_status(%r10) 424 orl $TS_COMPAT,TI_status(%r10)
425 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 425 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 94d420b360d1..4554cc6fb96a 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -17,8 +17,8 @@
17 17
18.macro altinstruction_entry orig alt feature orig_len alt_len 18.macro altinstruction_entry orig alt feature orig_len alt_len
19 .align 8 19 .align 8
20 .quad \orig 20 .long \orig - .
21 .quad \alt 21 .long \alt - .
22 .word \feature 22 .word \feature
23 .byte \orig_len 23 .byte \orig_len
24 .byte \alt_len 24 .byte \alt_len
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index bf535f947e8c..23fb6d79f209 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -43,8 +43,8 @@
43#endif 43#endif
44 44
45struct alt_instr { 45struct alt_instr {
46 u8 *instr; /* original instruction */ 46 s32 instr_offset; /* original instruction */
47 u8 *replacement; 47 s32 repl_offset; /* offset to replacement instruction */
48 u16 cpuid; /* cpuid bit set for replacement */ 48 u16 cpuid; /* cpuid bit set for replacement */
49 u8 instrlen; /* length of original instruction */ 49 u8 instrlen; /* length of original instruction */
50 u8 replacementlen; /* length of new instruction, <= instrlen */ 50 u8 replacementlen; /* length of new instruction, <= instrlen */
@@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
84 "661:\n\t" oldinstr "\n662:\n" \ 84 "661:\n\t" oldinstr "\n662:\n" \
85 ".section .altinstructions,\"a\"\n" \ 85 ".section .altinstructions,\"a\"\n" \
86 _ASM_ALIGN "\n" \ 86 _ASM_ALIGN "\n" \
87 _ASM_PTR "661b\n" /* label */ \ 87 " .long 661b - .\n" /* label */ \
88 _ASM_PTR "663f\n" /* new instruction */ \ 88 " .long 663f - .\n" /* new instruction */ \
89 " .word " __stringify(feature) "\n" /* feature bit */ \ 89 " .word " __stringify(feature) "\n" /* feature bit */ \
90 " .byte 662b-661b\n" /* sourcelen */ \ 90 " .byte 662b-661b\n" /* sourcelen */ \
91 " .byte 664f-663f\n" /* replacementlen */ \ 91 " .byte 664f-663f\n" /* replacementlen */ \
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
deleted file mode 100644
index a6863a2dec1f..000000000000
--- a/arch/x86/include/asm/amd_iommu.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef _ASM_X86_AMD_IOMMU_H
21#define _ASM_X86_AMD_IOMMU_H
22
23#include <linux/irqreturn.h>
24
25#ifdef CONFIG_AMD_IOMMU
26
27extern int amd_iommu_detect(void);
28
29#else
30
31static inline int amd_iommu_detect(void) { return -ENODEV; }
32
33#endif
34
35#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
deleted file mode 100644
index 55d95eb789b3..000000000000
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H
21
22#include <asm/amd_iommu_types.h>
23
24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
27extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
28extern void amd_iommu_apply_erratum_63(u16 devid);
29extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
30extern int amd_iommu_init_devices(void);
31extern void amd_iommu_uninit_devices(void);
32extern void amd_iommu_init_notifier(void);
33extern void amd_iommu_init_api(void);
34#ifndef CONFIG_AMD_IOMMU_STATS
35
36static inline void amd_iommu_stats_init(void) { }
37
38#endif /* !CONFIG_AMD_IOMMU_STATS */
39
40static inline bool is_rd890_iommu(struct pci_dev *pdev)
41{
42 return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
43 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
44}
45
46static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
47{
48 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
49 return false;
50
51 return !!(iommu->features & f);
52}
53
54#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
deleted file mode 100644
index 4c9982995414..000000000000
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ /dev/null
@@ -1,580 +0,0 @@
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
21#define _ASM_X86_AMD_IOMMU_TYPES_H
22
23#include <linux/types.h>
24#include <linux/mutex.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27
28/*
29 * Maximum number of IOMMUs supported
30 */
31#define MAX_IOMMUS 32
32
33/*
34 * some size calculation constants
35 */
36#define DEV_TABLE_ENTRY_SIZE 32
37#define ALIAS_TABLE_ENTRY_SIZE 2
38#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
39
40/* Length of the MMIO region for the AMD IOMMU */
41#define MMIO_REGION_LENGTH 0x4000
42
43/* Capability offsets used by the driver */
44#define MMIO_CAP_HDR_OFFSET 0x00
45#define MMIO_RANGE_OFFSET 0x0c
46#define MMIO_MISC_OFFSET 0x10
47
48/* Masks, shifts and macros to parse the device range capability */
49#define MMIO_RANGE_LD_MASK 0xff000000
50#define MMIO_RANGE_FD_MASK 0x00ff0000
51#define MMIO_RANGE_BUS_MASK 0x0000ff00
52#define MMIO_RANGE_LD_SHIFT 24
53#define MMIO_RANGE_FD_SHIFT 16
54#define MMIO_RANGE_BUS_SHIFT 8
55#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
56#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
57#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
58#define MMIO_MSI_NUM(x) ((x) & 0x1f)
59
60/* Flag masks for the AMD IOMMU exclusion range */
61#define MMIO_EXCL_ENABLE_MASK 0x01ULL
62#define MMIO_EXCL_ALLOW_MASK 0x02ULL
63
64/* Used offsets into the MMIO space */
65#define MMIO_DEV_TABLE_OFFSET 0x0000
66#define MMIO_CMD_BUF_OFFSET 0x0008
67#define MMIO_EVT_BUF_OFFSET 0x0010
68#define MMIO_CONTROL_OFFSET 0x0018
69#define MMIO_EXCL_BASE_OFFSET 0x0020
70#define MMIO_EXCL_LIMIT_OFFSET 0x0028
71#define MMIO_EXT_FEATURES 0x0030
72#define MMIO_CMD_HEAD_OFFSET 0x2000
73#define MMIO_CMD_TAIL_OFFSET 0x2008
74#define MMIO_EVT_HEAD_OFFSET 0x2010
75#define MMIO_EVT_TAIL_OFFSET 0x2018
76#define MMIO_STATUS_OFFSET 0x2020
77
78
79/* Extended Feature Bits */
80#define FEATURE_PREFETCH (1ULL<<0)
81#define FEATURE_PPR (1ULL<<1)
82#define FEATURE_X2APIC (1ULL<<2)
83#define FEATURE_NX (1ULL<<3)
84#define FEATURE_GT (1ULL<<4)
85#define FEATURE_IA (1ULL<<6)
86#define FEATURE_GA (1ULL<<7)
87#define FEATURE_HE (1ULL<<8)
88#define FEATURE_PC (1ULL<<9)
89
90/* MMIO status bits */
91#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
92
93/* event logging constants */
94#define EVENT_ENTRY_SIZE 0x10
95#define EVENT_TYPE_SHIFT 28
96#define EVENT_TYPE_MASK 0xf
97#define EVENT_TYPE_ILL_DEV 0x1
98#define EVENT_TYPE_IO_FAULT 0x2
99#define EVENT_TYPE_DEV_TAB_ERR 0x3
100#define EVENT_TYPE_PAGE_TAB_ERR 0x4
101#define EVENT_TYPE_ILL_CMD 0x5
102#define EVENT_TYPE_CMD_HARD_ERR 0x6
103#define EVENT_TYPE_IOTLB_INV_TO 0x7
104#define EVENT_TYPE_INV_DEV_REQ 0x8
105#define EVENT_DEVID_MASK 0xffff
106#define EVENT_DEVID_SHIFT 0
107#define EVENT_DOMID_MASK 0xffff
108#define EVENT_DOMID_SHIFT 0
109#define EVENT_FLAGS_MASK 0xfff
110#define EVENT_FLAGS_SHIFT 0x10
111
112/* feature control bits */
113#define CONTROL_IOMMU_EN 0x00ULL
114#define CONTROL_HT_TUN_EN 0x01ULL
115#define CONTROL_EVT_LOG_EN 0x02ULL
116#define CONTROL_EVT_INT_EN 0x03ULL
117#define CONTROL_COMWAIT_EN 0x04ULL
118#define CONTROL_PASSPW_EN 0x08ULL
119#define CONTROL_RESPASSPW_EN 0x09ULL
120#define CONTROL_COHERENT_EN 0x0aULL
121#define CONTROL_ISOC_EN 0x0bULL
122#define CONTROL_CMDBUF_EN 0x0cULL
123#define CONTROL_PPFLOG_EN 0x0dULL
124#define CONTROL_PPFINT_EN 0x0eULL
125
126/* command specific defines */
127#define CMD_COMPL_WAIT 0x01
128#define CMD_INV_DEV_ENTRY 0x02
129#define CMD_INV_IOMMU_PAGES 0x03
130#define CMD_INV_IOTLB_PAGES 0x04
131#define CMD_INV_ALL 0x08
132
133#define CMD_COMPL_WAIT_STORE_MASK 0x01
134#define CMD_COMPL_WAIT_INT_MASK 0x02
135#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
136#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
137
138#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
139
140/* macros and definitions for device table entries */
141#define DEV_ENTRY_VALID 0x00
142#define DEV_ENTRY_TRANSLATION 0x01
143#define DEV_ENTRY_IR 0x3d
144#define DEV_ENTRY_IW 0x3e
145#define DEV_ENTRY_NO_PAGE_FAULT 0x62
146#define DEV_ENTRY_EX 0x67
147#define DEV_ENTRY_SYSMGT1 0x68
148#define DEV_ENTRY_SYSMGT2 0x69
149#define DEV_ENTRY_INIT_PASS 0xb8
150#define DEV_ENTRY_EINT_PASS 0xb9
151#define DEV_ENTRY_NMI_PASS 0xba
152#define DEV_ENTRY_LINT0_PASS 0xbe
153#define DEV_ENTRY_LINT1_PASS 0xbf
154#define DEV_ENTRY_MODE_MASK 0x07
155#define DEV_ENTRY_MODE_SHIFT 0x09
156
157/* constants to configure the command buffer */
158#define CMD_BUFFER_SIZE 8192
159#define CMD_BUFFER_UNINITIALIZED 1
160#define CMD_BUFFER_ENTRIES 512
161#define MMIO_CMD_SIZE_SHIFT 56
162#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
163
164/* constants for event buffer handling */
165#define EVT_BUFFER_SIZE 8192 /* 512 entries */
166#define EVT_LEN_MASK (0x9ULL << 56)
167
168#define PAGE_MODE_NONE 0x00
169#define PAGE_MODE_1_LEVEL 0x01
170#define PAGE_MODE_2_LEVEL 0x02
171#define PAGE_MODE_3_LEVEL 0x03
172#define PAGE_MODE_4_LEVEL 0x04
173#define PAGE_MODE_5_LEVEL 0x05
174#define PAGE_MODE_6_LEVEL 0x06
175
176#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
177#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
178 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
179 (0xffffffffffffffffULL))
180#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
181#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
182#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
183 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
184#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
185
186#define PM_MAP_4k 0
187#define PM_ADDR_MASK 0x000ffffffffff000ULL
188#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
189 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
190#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
191
192/*
193 * Returns the page table level to use for a given page size
194 * Pagesize is expected to be a power-of-two
195 */
196#define PAGE_SIZE_LEVEL(pagesize) \
197 ((__ffs(pagesize) - 12) / 9)
198/*
199 * Returns the number of ptes to use for a given page size
200 * Pagesize is expected to be a power-of-two
201 */
202#define PAGE_SIZE_PTE_COUNT(pagesize) \
203 (1ULL << ((__ffs(pagesize) - 12) % 9))
204
205/*
206 * Aligns a given io-virtual address to a given page size
207 * Pagesize is expected to be a power-of-two
208 */
209#define PAGE_SIZE_ALIGN(address, pagesize) \
210 ((address) & ~((pagesize) - 1))
211/*
212 * Creates an IOMMU PTE for an address an a given pagesize
213 * The PTE has no permission bits set
214 * Pagesize is expected to be a power-of-two larger than 4096
215 */
216#define PAGE_SIZE_PTE(address, pagesize) \
217 (((address) | ((pagesize) - 1)) & \
218 (~(pagesize >> 1)) & PM_ADDR_MASK)
219
220/*
221 * Takes a PTE value with mode=0x07 and returns the page size it maps
222 */
223#define PTE_PAGE_SIZE(pte) \
224 (1ULL << (1 + ffz(((pte) | 0xfffULL))))
225
226#define IOMMU_PTE_P (1ULL << 0)
227#define IOMMU_PTE_TV (1ULL << 1)
228#define IOMMU_PTE_U (1ULL << 59)
229#define IOMMU_PTE_FC (1ULL << 60)
230#define IOMMU_PTE_IR (1ULL << 61)
231#define IOMMU_PTE_IW (1ULL << 62)
232
233#define DTE_FLAG_IOTLB 0x01
234
235#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
236#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
237#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
238#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
239
240#define IOMMU_PROT_MASK 0x03
241#define IOMMU_PROT_IR 0x01
242#define IOMMU_PROT_IW 0x02
243
244/* IOMMU capabilities */
245#define IOMMU_CAP_IOTLB 24
246#define IOMMU_CAP_NPCACHE 26
247#define IOMMU_CAP_EFR 27
248
249#define MAX_DOMAIN_ID 65536
250
251/* FIXME: move this macro to <linux/pci.h> */
252#define PCI_BUS(x) (((x) >> 8) & 0xff)
253
254/* Protection domain flags */
255#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
256#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
257 domain for an IOMMU */
258#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
259 translation */
260
261extern bool amd_iommu_dump;
262#define DUMP_printk(format, arg...) \
263 do { \
264 if (amd_iommu_dump) \
265 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
266 } while(0);
267
268/* global flag if IOMMUs cache non-present entries */
269extern bool amd_iommu_np_cache;
270/* Only true if all IOMMUs support device IOTLBs */
271extern bool amd_iommu_iotlb_sup;
272
273/*
274 * Make iterating over all IOMMUs easier
275 */
276#define for_each_iommu(iommu) \
277 list_for_each_entry((iommu), &amd_iommu_list, list)
278#define for_each_iommu_safe(iommu, next) \
279 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
280
281#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
282#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
283#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
284#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
285#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
286#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
287
288/*
289 * This structure contains generic data for IOMMU protection domains
290 * independent of their use.
291 */
292struct protection_domain {
293 struct list_head list; /* for list of all protection domains */
294 struct list_head dev_list; /* List of all devices in this domain */
295 spinlock_t lock; /* mostly used to lock the page table*/
296 struct mutex api_lock; /* protect page tables in the iommu-api path */
297 u16 id; /* the domain id written to the device table */
298 int mode; /* paging mode (0-6 levels) */
299 u64 *pt_root; /* page table root pointer */
300 unsigned long flags; /* flags to find out type of domain */
301 bool updated; /* complete domain flush required */
302 unsigned dev_cnt; /* devices assigned to this domain */
303 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
304 void *priv; /* private data */
305
306};
307
308/*
309 * This struct contains device specific data for the IOMMU
310 */
311struct iommu_dev_data {
312 struct list_head list; /* For domain->dev_list */
313 struct device *dev; /* Device this data belong to */
314 struct device *alias; /* The Alias Device */
315 struct protection_domain *domain; /* Domain the device is bound to */
316 atomic_t bind; /* Domain attach reverent count */
317};
318
319/*
320 * For dynamic growth the aperture size is split into ranges of 128MB of
321 * DMA address space each. This struct represents one such range.
322 */
323struct aperture_range {
324
325 /* address allocation bitmap */
326 unsigned long *bitmap;
327
328 /*
329 * Array of PTE pages for the aperture. In this array we save all the
330 * leaf pages of the domain page table used for the aperture. This way
331 * we don't need to walk the page table to find a specific PTE. We can
332 * just calculate its address in constant time.
333 */
334 u64 *pte_pages[64];
335
336 unsigned long offset;
337};
338
339/*
340 * Data container for a dma_ops specific protection domain
341 */
342struct dma_ops_domain {
343 struct list_head list;
344
345 /* generic protection domain information */
346 struct protection_domain domain;
347
348 /* size of the aperture for the mappings */
349 unsigned long aperture_size;
350
351 /* address we start to search for free addresses */
352 unsigned long next_address;
353
354 /* address space relevant data */
355 struct aperture_range *aperture[APERTURE_MAX_RANGES];
356
357 /* This will be set to true when TLB needs to be flushed */
358 bool need_flush;
359
360 /*
361 * if this is a preallocated domain, keep the device for which it was
362 * preallocated in this variable
363 */
364 u16 target_dev;
365};
366
367/*
368 * Structure where we save information about one hardware AMD IOMMU in the
369 * system.
370 */
371struct amd_iommu {
372 struct list_head list;
373
374 /* Index within the IOMMU array */
375 int index;
376
377 /* locks the accesses to the hardware */
378 spinlock_t lock;
379
380 /* Pointer to PCI device of this IOMMU */
381 struct pci_dev *dev;
382
383 /* physical address of MMIO space */
384 u64 mmio_phys;
385 /* virtual address of MMIO space */
386 u8 *mmio_base;
387
388 /* capabilities of that IOMMU read from ACPI */
389 u32 cap;
390
391 /* flags read from acpi table */
392 u8 acpi_flags;
393
394 /* Extended features */
395 u64 features;
396
397 /*
398 * Capability pointer. There could be more than one IOMMU per PCI
399 * device function if there are more than one AMD IOMMU capability
400 * pointers.
401 */
402 u16 cap_ptr;
403
404 /* pci domain of this IOMMU */
405 u16 pci_seg;
406
407 /* first device this IOMMU handles. read from PCI */
408 u16 first_device;
409 /* last device this IOMMU handles. read from PCI */
410 u16 last_device;
411
412 /* start of exclusion range of that IOMMU */
413 u64 exclusion_start;
414 /* length of exclusion range of that IOMMU */
415 u64 exclusion_length;
416
417 /* command buffer virtual address */
418 u8 *cmd_buf;
419 /* size of command buffer */
420 u32 cmd_buf_size;
421
422 /* size of event buffer */
423 u32 evt_buf_size;
424 /* event buffer virtual address */
425 u8 *evt_buf;
426 /* MSI number for event interrupt */
427 u16 evt_msi_num;
428
429 /* true if interrupts for this IOMMU are already enabled */
430 bool int_enabled;
431
432 /* if one, we need to send a completion wait command */
433 bool need_sync;
434
435 /* default dma_ops domain for that IOMMU */
436 struct dma_ops_domain *default_dom;
437
438 /*
439 * We can't rely on the BIOS to restore all values on reinit, so we
440 * need to stash them
441 */
442
443 /* The iommu BAR */
444 u32 stored_addr_lo;
445 u32 stored_addr_hi;
446
447 /*
448 * Each iommu has 6 l1s, each of which is documented as having 0x12
449 * registers
450 */
451 u32 stored_l1[6][0x12];
452
453 /* The l2 indirect registers */
454 u32 stored_l2[0x83];
455};
456
457/*
458 * List with all IOMMUs in the system. This list is not locked because it is
459 * only written and read at driver initialization or suspend time
460 */
461extern struct list_head amd_iommu_list;
462
463/*
464 * Array with pointers to each IOMMU struct
465 * The indices are referenced in the protection domains
466 */
467extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
468
469/* Number of IOMMUs present in the system */
470extern int amd_iommus_present;
471
472/*
473 * Declarations for the global list of all protection domains
474 */
475extern spinlock_t amd_iommu_pd_lock;
476extern struct list_head amd_iommu_pd_list;
477
478/*
479 * Structure defining one entry in the device table
480 */
481struct dev_table_entry {
482 u32 data[8];
483};
484
485/*
486 * One entry for unity mappings parsed out of the ACPI table.
487 */
488struct unity_map_entry {
489 struct list_head list;
490
491 /* starting device id this entry is used for (including) */
492 u16 devid_start;
493 /* end device id this entry is used for (including) */
494 u16 devid_end;
495
496 /* start address to unity map (including) */
497 u64 address_start;
498 /* end address to unity map (including) */
499 u64 address_end;
500
501 /* required protection */
502 int prot;
503};
504
505/*
506 * List of all unity mappings. It is not locked because as runtime it is only
507 * read. It is created at ACPI table parsing time.
508 */
509extern struct list_head amd_iommu_unity_map;
510
511/*
512 * Data structures for device handling
513 */
514
515/*
516 * Device table used by hardware. Read and write accesses by software are
517 * locked with the amd_iommu_pd_table lock.
518 */
519extern struct dev_table_entry *amd_iommu_dev_table;
520
521/*
522 * Alias table to find requestor ids to device ids. Not locked because only
523 * read on runtime.
524 */
525extern u16 *amd_iommu_alias_table;
526
527/*
528 * Reverse lookup table to find the IOMMU which translates a specific device.
529 */
530extern struct amd_iommu **amd_iommu_rlookup_table;
531
532/* size of the dma_ops aperture as power of 2 */
533extern unsigned amd_iommu_aperture_order;
534
535/* largest PCI device id we expect translation requests for */
536extern u16 amd_iommu_last_bdf;
537
538/* allocation bitmap for domain ids */
539extern unsigned long *amd_iommu_pd_alloc_bitmap;
540
541/*
542 * If true, the addresses will be flushed on unmap time, not when
543 * they are reused
544 */
545extern bool amd_iommu_unmap_flush;
546
547/* takes bus and device/function and returns the device id
548 * FIXME: should that be in generic PCI code? */
549static inline u16 calc_devid(u8 bus, u8 devfn)
550{
551 return (((u16)bus) << 8) | devfn;
552}
553
554#ifdef CONFIG_AMD_IOMMU_STATS
555
556struct __iommu_counter {
557 char *name;
558 struct dentry *dent;
559 u64 value;
560};
561
562#define DECLARE_STATS_COUNTER(nm) \
563 static struct __iommu_counter nm = { \
564 .name = #nm, \
565 }
566
567#define INC_STATS_COUNTER(name) name.value += 1
568#define ADD_STATS_COUNTER(name, x) name.value += (x)
569#define SUB_STATS_COUNTER(name, x) name.value -= (x)
570
571#else /* CONFIG_AMD_IOMMU_STATS */
572
573#define DECLARE_STATS_COUNTER(name)
574#define INC_STATS_COUNTER(name)
575#define ADD_STATS_COUNTER(name, x)
576#define SUB_STATS_COUNTER(name, x)
577
578#endif /* CONFIG_AMD_IOMMU_STATS */
579
580#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index af60d8a2e288..0acbac299e49 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -18,24 +18,6 @@
18 18
19#ifdef CONFIG_APB_TIMER 19#ifdef CONFIG_APB_TIMER
20 20
21/* Langwell DW APB timer registers */
22#define APBTMR_N_LOAD_COUNT 0x00
23#define APBTMR_N_CURRENT_VALUE 0x04
24#define APBTMR_N_CONTROL 0x08
25#define APBTMR_N_EOI 0x0c
26#define APBTMR_N_INT_STATUS 0x10
27
28#define APBTMRS_INT_STATUS 0xa0
29#define APBTMRS_EOI 0xa4
30#define APBTMRS_RAW_INT_STATUS 0xa8
31#define APBTMRS_COMP_VERSION 0xac
32#define APBTMRS_REG_SIZE 0x14
33
34/* register bits */
35#define APBTMR_CONTROL_ENABLE (1<<0)
36#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
37#define APBTMR_CONTROL_INT (1<<2)
38
39/* default memory mapped register base */ 21/* default memory mapped register base */
40#define LNW_SCU_ADDR 0xFF100000 22#define LNW_SCU_ADDR 0xFF100000
41#define LNW_EXT_TIMER_OFFSET 0x1B800 23#define LNW_EXT_TIMER_OFFSET 0x1B800
@@ -43,14 +25,13 @@
43#define LNW_EXT_TIMER_PGOFFSET 0x800 25#define LNW_EXT_TIMER_PGOFFSET 0x800
44 26
45/* APBT clock speed range from PCLK to fabric base, 25-100MHz */ 27/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
46#define APBT_MAX_FREQ 50 28#define APBT_MAX_FREQ 50000000
47#define APBT_MIN_FREQ 1 29#define APBT_MIN_FREQ 1000000
48#define APBT_MMAP_SIZE 1024 30#define APBT_MMAP_SIZE 1024
49 31
50#define APBT_DEV_USED 1 32#define APBT_DEV_USED 1
51 33
52extern void apbt_time_init(void); 34extern void apbt_time_init(void);
53extern struct clock_event_device *global_clock_event;
54extern unsigned long apbt_quick_calibrate(void); 35extern unsigned long apbt_quick_calibrate(void);
55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); 36extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
56extern void apbt_setup_secondary_clock(void); 37extern void apbt_setup_secondary_clock(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index b3ed1e1460ff..9412d6558c88 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,9 +3,11 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_FORM_COMMA(x) x,
6# define __ASM_EX_SEC .section __ex_table, "a" 7# define __ASM_EX_SEC .section __ex_table, "a"
7#else 8#else
8# define __ASM_FORM(x) " " #x " " 9# define __ASM_FORM(x) " " #x " "
10# define __ASM_FORM_COMMA(x) " " #x ","
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" 11# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
10#endif 12#endif
11 13
@@ -15,7 +17,8 @@
15# define __ASM_SEL(a,b) __ASM_FORM(b) 17# define __ASM_SEL(a,b) __ASM_FORM(b)
16#endif 18#endif
17 19
18#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) 20#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
21 inst##q##__VA_ARGS__)
19#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) 22#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
20 23
21#define _ASM_PTR __ASM_SEL(.long, .quad) 24#define _ASM_PTR __ASM_SEL(.long, .quad)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 30af5a832163..a9e3a740f697 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include "dwarf2.h"
49 50
50/* 51/*
51 * 64-bit system call stack frame layout defines and helpers, for 52 * 64-bit system call stack frame layout defines and helpers, for
@@ -84,72 +85,57 @@ For 32-bit we have the following conventions - kernel is built with
84#define ARGOFFSET R11 85#define ARGOFFSET R11
85#define SWFRAME ORIG_RAX 86#define SWFRAME ORIG_RAX
86 87
87 .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
88 subq $9*8+\addskip, %rsp 89 subq $9*8+\addskip, %rsp
89 CFI_ADJUST_CFA_OFFSET 9*8+\addskip 90 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
90 movq %rdi, 8*8(%rsp) 91 movq_cfi rdi, 8*8
91 CFI_REL_OFFSET rdi, 8*8 92 movq_cfi rsi, 7*8
92 movq %rsi, 7*8(%rsp) 93 movq_cfi rdx, 6*8
93 CFI_REL_OFFSET rsi, 7*8 94
94 movq %rdx, 6*8(%rsp) 95 .if \save_rcx
95 CFI_REL_OFFSET rdx, 6*8 96 movq_cfi rcx, 5*8
96 .if \norcx
97 .else
98 movq %rcx, 5*8(%rsp)
99 CFI_REL_OFFSET rcx, 5*8
100 .endif 97 .endif
101 movq %rax, 4*8(%rsp) 98
102 CFI_REL_OFFSET rax, 4*8 99 movq_cfi rax, 4*8
103 .if \nor891011 100
104 .else 101 .if \save_r891011
105 movq %r8, 3*8(%rsp) 102 movq_cfi r8, 3*8
106 CFI_REL_OFFSET r8, 3*8 103 movq_cfi r9, 2*8
107 movq %r9, 2*8(%rsp) 104 movq_cfi r10, 1*8
108 CFI_REL_OFFSET r9, 2*8 105 movq_cfi r11, 0*8
109 movq %r10, 1*8(%rsp)
110 CFI_REL_OFFSET r10, 1*8
111 movq %r11, (%rsp)
112 CFI_REL_OFFSET r11, 0*8
113 .endif 106 .endif
107
114 .endm 108 .endm
115 109
116#define ARG_SKIP (9*8) 110#define ARG_SKIP (9*8)
117 111
118 .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ 112 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
119 skipr8910=0, skiprdx=0 113 rstor_r8910=1, rstor_rdx=1
120 .if \skipr11 114 .if \rstor_r11
121 .else 115 movq_cfi_restore 0*8, r11
122 movq (%rsp), %r11
123 CFI_RESTORE r11
124 .endif 116 .endif
125 .if \skipr8910 117
126 .else 118 .if \rstor_r8910
127 movq 1*8(%rsp), %r10 119 movq_cfi_restore 1*8, r10
128 CFI_RESTORE r10 120 movq_cfi_restore 2*8, r9
129 movq 2*8(%rsp), %r9 121 movq_cfi_restore 3*8, r8
130 CFI_RESTORE r9
131 movq 3*8(%rsp), %r8
132 CFI_RESTORE r8
133 .endif 122 .endif
134 .if \skiprax 123
135 .else 124 .if \rstor_rax
136 movq 4*8(%rsp), %rax 125 movq_cfi_restore 4*8, rax
137 CFI_RESTORE rax
138 .endif 126 .endif
139 .if \skiprcx 127
140 .else 128 .if \rstor_rcx
141 movq 5*8(%rsp), %rcx 129 movq_cfi_restore 5*8, rcx
142 CFI_RESTORE rcx
143 .endif 130 .endif
144 .if \skiprdx 131
145 .else 132 .if \rstor_rdx
146 movq 6*8(%rsp), %rdx 133 movq_cfi_restore 6*8, rdx
147 CFI_RESTORE rdx
148 .endif 134 .endif
149 movq 7*8(%rsp), %rsi 135
150 CFI_RESTORE rsi 136 movq_cfi_restore 7*8, rsi
151 movq 8*8(%rsp), %rdi 137 movq_cfi_restore 8*8, rdi
152 CFI_RESTORE rdi 138
153 .if ARG_SKIP+\addskip > 0 139 .if ARG_SKIP+\addskip > 0
154 addq $ARG_SKIP+\addskip, %rsp 140 addq $ARG_SKIP+\addskip, %rsp
155 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) 141 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
@@ -176,33 +162,21 @@ For 32-bit we have the following conventions - kernel is built with
176 .macro SAVE_REST 162 .macro SAVE_REST
177 subq $REST_SKIP, %rsp 163 subq $REST_SKIP, %rsp
178 CFI_ADJUST_CFA_OFFSET REST_SKIP 164 CFI_ADJUST_CFA_OFFSET REST_SKIP
179 movq %rbx, 5*8(%rsp) 165 movq_cfi rbx, 5*8
180 CFI_REL_OFFSET rbx, 5*8 166 movq_cfi rbp, 4*8
181 movq %rbp, 4*8(%rsp) 167 movq_cfi r12, 3*8
182 CFI_REL_OFFSET rbp, 4*8 168 movq_cfi r13, 2*8
183 movq %r12, 3*8(%rsp) 169 movq_cfi r14, 1*8
184 CFI_REL_OFFSET r12, 3*8 170 movq_cfi r15, 0*8
185 movq %r13, 2*8(%rsp)
186 CFI_REL_OFFSET r13, 2*8
187 movq %r14, 1*8(%rsp)
188 CFI_REL_OFFSET r14, 1*8
189 movq %r15, (%rsp)
190 CFI_REL_OFFSET r15, 0*8
191 .endm 171 .endm
192 172
193 .macro RESTORE_REST 173 .macro RESTORE_REST
194 movq (%rsp), %r15 174 movq_cfi_restore 0*8, r15
195 CFI_RESTORE r15 175 movq_cfi_restore 1*8, r14
196 movq 1*8(%rsp), %r14 176 movq_cfi_restore 2*8, r13
197 CFI_RESTORE r14 177 movq_cfi_restore 3*8, r12
198 movq 2*8(%rsp), %r13 178 movq_cfi_restore 4*8, rbp
199 CFI_RESTORE r13 179 movq_cfi_restore 5*8, rbx
200 movq 3*8(%rsp), %r12
201 CFI_RESTORE r12
202 movq 4*8(%rsp), %rbp
203 CFI_RESTORE rbp
204 movq 5*8(%rsp), %rbx
205 CFI_RESTORE rbx
206 addq $REST_SKIP, %rsp 180 addq $REST_SKIP, %rsp
207 CFI_ADJUST_CFA_OFFSET -(REST_SKIP) 181 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
208 .endm 182 .endm
@@ -214,7 +188,7 @@ For 32-bit we have the following conventions - kernel is built with
214 188
215 .macro RESTORE_ALL addskip=0 189 .macro RESTORE_ALL addskip=0
216 RESTORE_REST 190 RESTORE_REST
217 RESTORE_ARGS 0, \addskip 191 RESTORE_ARGS 1, \addskip
218 .endm 192 .endm
219 193
220 .macro icebp 194 .macro icebp
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
new file mode 100644
index 000000000000..0bdbbb3b9ce7
--- /dev/null
+++ b/arch/x86/include/asm/clocksource.h
@@ -0,0 +1,18 @@
1/* x86-specific clocksource additions */
2
3#ifndef _ASM_X86_CLOCKSOURCE_H
4#define _ASM_X86_CLOCKSOURCE_H
5
6#ifdef CONFIG_X86_64
7
8#define VCLOCK_NONE 0 /* No vDSO clock available. */
9#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
10#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
11
12struct arch_clocksource_data {
13 int vclock_mode;
14};
15
16#endif /* CONFIG_X86_64 */
17
18#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 284a6e8f7ce1..3deb7250624c 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -280,4 +280,52 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
280 280
281#endif 281#endif
282 282
283#define cmpxchg8b(ptr, o1, o2, n1, n2) \
284({ \
285 char __ret; \
286 __typeof__(o2) __dummy; \
287 __typeof__(*(ptr)) __old1 = (o1); \
288 __typeof__(o2) __old2 = (o2); \
289 __typeof__(*(ptr)) __new1 = (n1); \
290 __typeof__(o2) __new2 = (n2); \
291 asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
292 : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
293 : "a" (__old1), "d"(__old2), \
294 "b" (__new1), "c" (__new2) \
295 : "memory"); \
296 __ret; })
297
298
299#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
300({ \
301 char __ret; \
302 __typeof__(o2) __dummy; \
303 __typeof__(*(ptr)) __old1 = (o1); \
304 __typeof__(o2) __old2 = (o2); \
305 __typeof__(*(ptr)) __new1 = (n1); \
306 __typeof__(o2) __new2 = (n2); \
307 asm volatile("cmpxchg8b %2; setz %1" \
308 : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
309 : "a" (__old), "d"(__old2), \
310 "b" (__new1), "c" (__new2), \
311 : "memory"); \
312 __ret; })
313
314
315#define cmpxchg_double(ptr, o1, o2, n1, n2) \
316({ \
317 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
318 VM_BUG_ON((unsigned long)(ptr) % 8); \
319 cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
320})
321
322#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
323({ \
324 BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
325 VM_BUG_ON((unsigned long)(ptr) % 8); \
326 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
327})
328
329#define system_has_cmpxchg_double() cpu_has_cx8
330
283#endif /* _ASM_X86_CMPXCHG_32_H */ 331#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 423ae58aa020..7cf5c0a24434 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -151,4 +151,49 @@ extern void __cmpxchg_wrong_size(void);
151 cmpxchg_local((ptr), (o), (n)); \ 151 cmpxchg_local((ptr), (o), (n)); \
152}) 152})
153 153
154#define cmpxchg16b(ptr, o1, o2, n1, n2) \
155({ \
156 char __ret; \
157 __typeof__(o2) __junk; \
158 __typeof__(*(ptr)) __old1 = (o1); \
159 __typeof__(o2) __old2 = (o2); \
160 __typeof__(*(ptr)) __new1 = (n1); \
161 __typeof__(o2) __new2 = (n2); \
162 asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
163 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
164 : "b"(__new1), "c"(__new2), \
165 "a"(__old1), "d"(__old2)); \
166 __ret; })
167
168
169#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
170({ \
171 char __ret; \
172 __typeof__(o2) __junk; \
173 __typeof__(*(ptr)) __old1 = (o1); \
174 __typeof__(o2) __old2 = (o2); \
175 __typeof__(*(ptr)) __new1 = (n1); \
176 __typeof__(o2) __new2 = (n2); \
177 asm volatile("cmpxchg16b %2;setz %1" \
178 : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
179 : "b"(__new1), "c"(__new2), \
180 "a"(__old1), "d"(__old2)); \
181 __ret; })
182
183#define cmpxchg_double(ptr, o1, o2, n1, n2) \
184({ \
185 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
186 VM_BUG_ON((unsigned long)(ptr) % 16); \
187 cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
188})
189
190#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
191({ \
192 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
193 VM_BUG_ON((unsigned long)(ptr) % 16); \
194 cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
195})
196
197#define system_has_cmpxchg_double() cpu_has_cx16
198
154#endif /* _ASM_X86_CMPXCHG_64_H */ 199#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 71cc3800712c..4258aac99a6e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -288,6 +288,8 @@ extern const char * const x86_power_flags[32];
288#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 288#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
289#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 289#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
290#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 290#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
291#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
292#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
291 293
292#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 294#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
293# define cpu_has_invlpg 1 295# define cpu_has_invlpg 1
@@ -331,8 +333,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
331 "2:\n" 333 "2:\n"
332 ".section .altinstructions,\"a\"\n" 334 ".section .altinstructions,\"a\"\n"
333 _ASM_ALIGN "\n" 335 _ASM_ALIGN "\n"
334 _ASM_PTR "1b\n" 336 " .long 1b - .\n"
335 _ASM_PTR "0\n" /* no replacement */ 337 " .long 0\n" /* no replacement */
336 " .word %P0\n" /* feature bit */ 338 " .word %P0\n" /* feature bit */
337 " .byte 2b - 1b\n" /* source len */ 339 " .byte 2b - 1b\n" /* source len */
338 " .byte 0\n" /* replacement len */ 340 " .byte 0\n" /* replacement len */
@@ -349,8 +351,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
349 "2:\n" 351 "2:\n"
350 ".section .altinstructions,\"a\"\n" 352 ".section .altinstructions,\"a\"\n"
351 _ASM_ALIGN "\n" 353 _ASM_ALIGN "\n"
352 _ASM_PTR "1b\n" 354 " .long 1b - .\n"
353 _ASM_PTR "3f\n" 355 " .long 3f - .\n"
354 " .word %P1\n" /* feature bit */ 356 " .word %P1\n" /* feature bit */
355 " .byte 2b - 1b\n" /* source len */ 357 " .byte 2b - 1b\n" /* source len */
356 " .byte 4f - 3f\n" /* replacement len */ 358 " .byte 4f - 3f\n" /* replacement len */
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 1cd6d26a0a8d..0baa628e330c 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -53,8 +53,4 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
53BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 53BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_MCE
57BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR)
58#endif
59
60#endif 56#endif
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4729b2b63117..460c74e4852c 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -78,6 +78,7 @@ enum fixed_addresses {
78 VSYSCALL_LAST_PAGE, 78 VSYSCALL_LAST_PAGE,
79 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE 79 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
80 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, 80 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
81 VVAR_PAGE,
81 VSYSCALL_HPET, 82 VSYSCALL_HPET,
82#endif 83#endif
83 FIX_DBGP_BASE, 84 FIX_DBGP_BASE,
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 2c6fc9e62812..3b629f47eb65 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,5 +1,6 @@
1#ifdef __ASSEMBLY__ 1#ifdef __ASSEMBLY__
2 2
3#include <asm/asm.h>
3#include <asm/dwarf2.h> 4#include <asm/dwarf2.h>
4 5
5/* The annotation hides the frame from the unwinder and makes it look 6/* The annotation hides the frame from the unwinder and makes it look
@@ -7,13 +8,13 @@
7 frame pointer later */ 8 frame pointer later */
8#ifdef CONFIG_FRAME_POINTER 9#ifdef CONFIG_FRAME_POINTER
9 .macro FRAME 10 .macro FRAME
10 pushl_cfi %ebp 11 __ASM_SIZE(push,_cfi) %__ASM_REG(bp)
11 CFI_REL_OFFSET ebp,0 12 CFI_REL_OFFSET __ASM_REG(bp), 0
12 movl %esp,%ebp 13 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
13 .endm 14 .endm
14 .macro ENDFRAME 15 .macro ENDFRAME
15 popl_cfi %ebp 16 __ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
16 CFI_RESTORE ebp 17 CFI_RESTORE __ASM_REG(bp)
17 .endm 18 .endm
18#else 19#else
19 .macro FRAME 20 .macro FRAME
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index bb9efe8706e2..13f5504c76c0 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -34,7 +34,6 @@ extern void irq_work_interrupt(void);
34extern void spurious_interrupt(void); 34extern void spurious_interrupt(void);
35extern void thermal_interrupt(void); 35extern void thermal_interrupt(void);
36extern void reschedule_interrupt(void); 36extern void reschedule_interrupt(void);
37extern void mce_self_interrupt(void);
38 37
39extern void invalidate_interrupt(void); 38extern void invalidate_interrupt(void);
40extern void invalidate_interrupt0(void); 39extern void invalidate_interrupt0(void);
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
deleted file mode 100644
index 65aaa91d5850..000000000000
--- a/arch/x86/include/asm/i8253.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef _ASM_X86_I8253_H
2#define _ASM_X86_I8253_H
3
4/* i8253A PIT registers */
5#define PIT_MODE 0x43
6#define PIT_CH0 0x40
7#define PIT_CH2 0x42
8
9#define PIT_LATCH LATCH
10
11extern raw_spinlock_t i8253_lock;
12
13extern struct clock_event_device *global_clock_event;
14
15extern void setup_pit_timer(void);
16
17#define inb_pit inb_p
18#define outb_pit outb_p
19
20#endif /* _ASM_X86_I8253_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6e976ee3b3ef..f9a320984a10 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -17,7 +17,8 @@
17 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events 17 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
18 * Vectors 32 ... 127 : device interrupts 18 * Vectors 32 ... 127 : device interrupts
19 * Vector 128 : legacy int80 syscall interface 19 * Vector 128 : legacy int80 syscall interface
20 * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts 20 * Vector 204 : legacy x86_64 vsyscall emulation
21 * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
21 * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts 22 * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
22 * 23 *
23 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. 24 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
@@ -50,6 +51,9 @@
50#ifdef CONFIG_X86_32 51#ifdef CONFIG_X86_32
51# define SYSCALL_VECTOR 0x80 52# define SYSCALL_VECTOR 0x80
52#endif 53#endif
54#ifdef CONFIG_X86_64
55# define VSYSCALL_EMU_VECTOR 0xcc
56#endif
53 57
54/* 58/*
55 * Vectors 0x30-0x3f are used for ISA interrupts. 59 * Vectors 0x30-0x3f are used for ISA interrupts.
@@ -109,11 +113,6 @@
109 113
110#define UV_BAU_MESSAGE 0xf5 114#define UV_BAU_MESSAGE 0xf5
111 115
112/*
113 * Self IPI vector for machine checks
114 */
115#define MCE_SELF_VECTOR 0xf4
116
117/* Xen vector callback to receive events in a HVM domain */ 116/* Xen vector callback to receive events in a HVM domain */
118#define XEN_HVM_EVTCHN_CALLBACK 0xf3 117#define XEN_HVM_EVTCHN_CALLBACK 0xf3
119 118
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 5745ce8bf108..bba3cf88e624 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -60,23 +60,24 @@ static inline void native_halt(void)
60#include <asm/paravirt.h> 60#include <asm/paravirt.h>
61#else 61#else
62#ifndef __ASSEMBLY__ 62#ifndef __ASSEMBLY__
63#include <linux/types.h>
63 64
64static inline unsigned long arch_local_save_flags(void) 65static inline notrace unsigned long arch_local_save_flags(void)
65{ 66{
66 return native_save_fl(); 67 return native_save_fl();
67} 68}
68 69
69static inline void arch_local_irq_restore(unsigned long flags) 70static inline notrace void arch_local_irq_restore(unsigned long flags)
70{ 71{
71 native_restore_fl(flags); 72 native_restore_fl(flags);
72} 73}
73 74
74static inline void arch_local_irq_disable(void) 75static inline notrace void arch_local_irq_disable(void)
75{ 76{
76 native_irq_disable(); 77 native_irq_disable();
77} 78}
78 79
79static inline void arch_local_irq_enable(void) 80static inline notrace void arch_local_irq_enable(void)
80{ 81{
81 native_irq_enable(); 82 native_irq_enable();
82} 83}
@@ -102,7 +103,7 @@ static inline void halt(void)
102/* 103/*
103 * For spinlocks, etc: 104 * For spinlocks, etc:
104 */ 105 */
105static inline unsigned long arch_local_irq_save(void) 106static inline notrace unsigned long arch_local_irq_save(void)
106{ 107{
107 unsigned long flags = arch_local_save_flags(); 108 unsigned long flags = arch_local_save_flags();
108 arch_local_irq_disable(); 109 arch_local_irq_disable();
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index b60f2924c413..879fd7d33877 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -61,6 +61,7 @@ hcall(unsigned long call,
61 : "memory"); 61 : "memory");
62 return call; 62 return call;
63} 63}
64/*:*/
64 65
65/* Can't use our min() macro here: needs to be a constant */ 66/* Can't use our min() macro here: needs to be a constant */
66#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) 67#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 021979a6e23f..716b48af7863 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -8,6 +8,7 @@
8 * Machine Check support for x86 8 * Machine Check support for x86
9 */ 9 */
10 10
11/* MCG_CAP register defines */
11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */ 12#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
12#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ 13#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ 14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
@@ -17,10 +18,12 @@
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) 18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 19#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
19 20
21/* MCG_STATUS register defines */
20#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
21#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
22#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
23 25
26/* MCi_STATUS register defines */
24#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
25#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 28#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
26#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 29#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
@@ -31,12 +34,14 @@
31#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
32#define MCI_STATUS_AR (1ULL<<55) /* Action required */ 35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
33 36
34/* MISC register defines */ 37/* MCi_MISC register defines */
35#define MCM_ADDR_SEGOFF 0 /* segment offset */ 38#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
36#define MCM_ADDR_LINEAR 1 /* linear address */ 39#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
37#define MCM_ADDR_PHYS 2 /* physical address */ 40#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
38#define MCM_ADDR_MEM 3 /* memory address */ 41#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
39#define MCM_ADDR_GENERIC 7 /* generic */ 42#define MCI_MISC_ADDR_PHYS 2 /* physical address */
43#define MCI_MISC_ADDR_MEM 3 /* memory address */
44#define MCI_MISC_ADDR_GENERIC 7 /* generic */
40 45
41/* CTL2 register defines */ 46/* CTL2 register defines */
42#define MCI_CTL2_CMCI_EN (1ULL << 30) 47#define MCI_CTL2_CMCI_EN (1ULL << 30)
@@ -144,7 +149,7 @@ static inline void enable_p5_mce(void) {}
144 149
145void mce_setup(struct mce *m); 150void mce_setup(struct mce *m);
146void mce_log(struct mce *m); 151void mce_log(struct mce *m);
147DECLARE_PER_CPU(struct sys_device, mce_dev); 152DECLARE_PER_CPU(struct sys_device, mce_sysdev);
148 153
149/* 154/*
150 * Maximum banks number. 155 * Maximum banks number.
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 224e8c5eb307..55728e121473 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -34,15 +34,15 @@ static inline void resume_map_numa_kva(pgd_t *pgd) {}
34 * 64Gb / 4096bytes/page = 16777216 pages 34 * 64Gb / 4096bytes/page = 16777216 pages
35 */ 35 */
36#define MAX_NR_PAGES 16777216 36#define MAX_NR_PAGES 16777216
37#define MAX_ELEMENTS 1024 37#define MAX_SECTIONS 1024
38#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) 38#define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS)
39 39
40extern s8 physnode_map[]; 40extern s8 physnode_map[];
41 41
42static inline int pfn_to_nid(unsigned long pfn) 42static inline int pfn_to_nid(unsigned long pfn)
43{ 43{
44#ifdef CONFIG_NUMA 44#ifdef CONFIG_NUMA
45 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); 45 return((int) physnode_map[(pfn) / PAGES_PER_SECTION]);
46#else 46#else
47 return 0; 47 return 0;
48#endif 48#endif
@@ -57,6 +57,8 @@ static inline int pfn_valid(int pfn)
57 return 0; 57 return 0;
58} 58}
59 59
60#define early_pfn_valid(pfn) pfn_valid((pfn))
61
60#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
61 63
62#ifdef CONFIG_NEED_MULTIPLE_NODES 64#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e3022ccff33b..d52609aeeab8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -259,6 +259,9 @@
259#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 259#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
260 260
261#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 261#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
262#define ENERGY_PERF_BIAS_PERFORMANCE 0
263#define ENERGY_PERF_BIAS_NORMAL 6
264#define ENERGY_PERF_BIAS_POWERSAVE 15
262 265
263#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 266#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
264 267
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a0a9779084d1..3470c9d0ebba 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -388,12 +388,9 @@ do { \
388#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 388#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
389#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 389#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
390#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 390#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
391/* 391#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
392 * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much 392#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
393 * faster than an xchg with forced lock semantics. 393#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
394 */
395#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
396#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
397 394
398#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 395#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
399#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 396#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -485,6 +482,8 @@ do { \
485#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 482#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
486#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 483#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
487#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) 484#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
485#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
486#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
488 487
489#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 488#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
490#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 489#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index d9d4dae305f6..094fb30817ab 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
152 (regs)->bp = caller_frame_pointer(); \ 152 (regs)->bp = caller_frame_pointer(); \
153 (regs)->cs = __KERNEL_CS; \ 153 (regs)->cs = __KERNEL_CS; \
154 regs->flags = 0; \ 154 regs->flags = 0; \
155 asm volatile( \
156 _ASM_MOV "%%"_ASM_SP ", %0\n" \
157 : "=m" ((regs)->sp) \
158 :: "memory" \
159 ); \
155} 160}
156 161
157#else 162#else
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 56fd9e3abbda..4f7e67e2345e 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -102,6 +102,14 @@
102#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) 102#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
103 103
104/* 104/*
105 * If an event has alias it should be marked
106 * with a special bit. (Don't forget to check
107 * P4_PEBS_CONFIG_MASK and related bits on
108 * modification.)
109 */
110#define P4_CONFIG_ALIASABLE (1 << 9)
111
112/*
105 * The bits we allow to pass for RAW events 113 * The bits we allow to pass for RAW events
106 */ 114 */
107#define P4_CONFIG_MASK_ESCR \ 115#define P4_CONFIG_MASK_ESCR \
@@ -123,6 +131,31 @@
123 (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ 131 (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
124 (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) 132 (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
125 133
134/*
135 * In case of event aliasing we need to preserve some
136 * caller bits, otherwise the mapping won't be complete.
137 */
138#define P4_CONFIG_EVENT_ALIAS_MASK \
139 (p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \
140 p4_config_pack_cccr(P4_CCCR_EDGE | \
141 P4_CCCR_THRESHOLD_MASK | \
142 P4_CCCR_COMPLEMENT | \
143 P4_CCCR_COMPARE))
144
145#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \
146 ((P4_CONFIG_HT) | \
147 p4_config_pack_escr(P4_ESCR_T0_OS | \
148 P4_ESCR_T0_USR | \
149 P4_ESCR_T1_OS | \
150 P4_ESCR_T1_USR) | \
151 p4_config_pack_cccr(P4_CCCR_OVF | \
152 P4_CCCR_CASCADE | \
153 P4_CCCR_FORCE_OVF | \
154 P4_CCCR_THREAD_ANY | \
155 P4_CCCR_OVF_PMI_T0 | \
156 P4_CCCR_OVF_PMI_T1 | \
157 P4_CONFIG_ALIASABLE))
158
126static inline bool p4_is_event_cascaded(u64 config) 159static inline bool p4_is_event_cascaded(u64 config)
127{ 160{
128 u32 cccr = p4_config_unpack_cccr(config); 161 u32 cccr = p4_config_unpack_cccr(config);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index d56187c6b838..013286a10c2c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -107,7 +107,8 @@
107#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) 107#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
108#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) 108#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
109#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) 109#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
110#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) 110#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
111#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
111#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 112#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
112#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) 113#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
113#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 114#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -129,7 +130,8 @@
129#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) 130#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
130#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 131#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
131#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) 132#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
132#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) 133#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
134#define PAGE_KERNEL_VVAR_NOCACHE __pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
133 135
134#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) 136#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
135#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) 137#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 971e0b46446e..df1287019e6d 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -30,17 +30,6 @@ extern void add_dtb(u64 data);
30extern void x86_add_irq_domains(void); 30extern void x86_add_irq_domains(void);
31void __cpuinit x86_of_pci_init(void); 31void __cpuinit x86_of_pci_init(void);
32void x86_dtb_init(void); 32void x86_dtb_init(void);
33
34static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
35{
36 return pdev ? pdev->dev.of_node : NULL;
37}
38
39static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
40{
41 return pci_device_to_OF_node(bus->self);
42}
43
44#else 33#else
45static inline void add_dtb(u64 data) { } 34static inline void add_dtb(u64 data) { }
46static inline void x86_add_irq_domains(void) { } 35static inline void x86_add_irq_domains(void) { }
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
index 6a8c0d645108..a5370a03d90c 100644
--- a/arch/x86/include/asm/rwlock.h
+++ b/arch/x86/include/asm/rwlock.h
@@ -1,7 +1,48 @@
1#ifndef _ASM_X86_RWLOCK_H 1#ifndef _ASM_X86_RWLOCK_H
2#define _ASM_X86_RWLOCK_H 2#define _ASM_X86_RWLOCK_H
3 3
4#define RW_LOCK_BIAS 0x01000000 4#include <asm/asm.h>
5
6#if CONFIG_NR_CPUS <= 2048
7
8#ifndef __ASSEMBLY__
9typedef union {
10 s32 lock;
11 s32 write;
12} arch_rwlock_t;
13#endif
14
15#define RW_LOCK_BIAS 0x00100000
16#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l)
17#define READ_LOCK_ATOMIC(n) atomic_##n
18#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n)
19#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n)
20#define WRITE_LOCK_CMP RW_LOCK_BIAS
21
22#else /* CONFIG_NR_CPUS > 2048 */
23
24#include <linux/const.h>
25
26#ifndef __ASSEMBLY__
27typedef union {
28 s64 lock;
29 struct {
30 u32 read;
31 s32 write;
32 };
33} arch_rwlock_t;
34#endif
35
36#define RW_LOCK_BIAS (_AC(1,L) << 32)
37#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q)
38#define READ_LOCK_ATOMIC(n) atomic64_##n
39#define WRITE_LOCK_ADD(n) __ASM_FORM(incl)
40#define WRITE_LOCK_SUB(n) __ASM_FORM(decl)
41#define WRITE_LOCK_CMP 1
42
43#endif /* CONFIG_NR_CPUS */
44
45#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
5 46
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 47/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7 48
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index cd84f7208f76..5e641715c3fe 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -162,7 +162,7 @@
162#define GDT_ENTRY_DEFAULT_USER32_CS 4 162#define GDT_ENTRY_DEFAULT_USER32_CS 4
163#define GDT_ENTRY_DEFAULT_USER_DS 5 163#define GDT_ENTRY_DEFAULT_USER_DS 5
164#define GDT_ENTRY_DEFAULT_USER_CS 6 164#define GDT_ENTRY_DEFAULT_USER_CS 6
165#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3) 165#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
166#define __USER32_DS __USER_DS 166#define __USER32_DS __USER_DS
167 167
168#define GDT_ENTRY_TSS 8 /* needs two entries */ 168#define GDT_ENTRY_TSS 8 /* needs two entries */
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 725b77831993..49adfd7bb4a4 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -10,7 +10,11 @@ static inline void smpboot_clear_io_apic_irqs(void)
10 10
11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
12{ 12{
13 unsigned long flags;
14
15 spin_lock_irqsave(&rtc_lock, flags);
13 CMOS_WRITE(0xa, 0xf); 16 CMOS_WRITE(0xa, 0xf);
17 spin_unlock_irqrestore(&rtc_lock, flags);
14 local_flush_tlb(); 18 local_flush_tlb();
15 pr_debug("1.\n"); 19 pr_debug("1.\n");
16 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) = 20 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
@@ -23,6 +27,8 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
23 27
24static inline void smpboot_restore_warm_reset_vector(void) 28static inline void smpboot_restore_warm_reset_vector(void)
25{ 29{
30 unsigned long flags;
31
26 /* 32 /*
27 * Install writable page 0 entry to set BIOS data area. 33 * Install writable page 0 entry to set BIOS data area.
28 */ 34 */
@@ -32,7 +38,9 @@ static inline void smpboot_restore_warm_reset_vector(void)
32 * Paranoid: Set warm reset code and vector here back 38 * Paranoid: Set warm reset code and vector here back
33 * to default values. 39 * to default values.
34 */ 40 */
41 spin_lock_irqsave(&rtc_lock, flags);
35 CMOS_WRITE(0, 0xf); 42 CMOS_WRITE(0, 0xf);
43 spin_unlock_irqrestore(&rtc_lock, flags);
36 44
37 *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; 45 *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 46}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 3089f70c0c52..e9e51f710e6c 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -2,7 +2,6 @@
2#define _ASM_X86_SPINLOCK_H 2#define _ASM_X86_SPINLOCK_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h> 5#include <asm/page.h>
7#include <asm/processor.h> 6#include <asm/processor.h>
8#include <linux/compiler.h> 7#include <linux/compiler.h>
@@ -234,7 +233,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
234 */ 233 */
235static inline int arch_read_can_lock(arch_rwlock_t *lock) 234static inline int arch_read_can_lock(arch_rwlock_t *lock)
236{ 235{
237 return (int)(lock)->lock > 0; 236 return lock->lock > 0;
238} 237}
239 238
240/** 239/**
@@ -243,12 +242,12 @@ static inline int arch_read_can_lock(arch_rwlock_t *lock)
243 */ 242 */
244static inline int arch_write_can_lock(arch_rwlock_t *lock) 243static inline int arch_write_can_lock(arch_rwlock_t *lock)
245{ 244{
246 return (lock)->lock == RW_LOCK_BIAS; 245 return lock->write == WRITE_LOCK_CMP;
247} 246}
248 247
249static inline void arch_read_lock(arch_rwlock_t *rw) 248static inline void arch_read_lock(arch_rwlock_t *rw)
250{ 249{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 250 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
252 "jns 1f\n" 251 "jns 1f\n"
253 "call __read_lock_failed\n\t" 252 "call __read_lock_failed\n\t"
254 "1:\n" 253 "1:\n"
@@ -257,47 +256,55 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
257 256
258static inline void arch_write_lock(arch_rwlock_t *rw) 257static inline void arch_write_lock(arch_rwlock_t *rw)
259{ 258{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 259 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
261 "jz 1f\n" 260 "jz 1f\n"
262 "call __write_lock_failed\n\t" 261 "call __write_lock_failed\n\t"
263 "1:\n" 262 "1:\n"
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 263 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
264 : "memory");
265} 265}
266 266
267static inline int arch_read_trylock(arch_rwlock_t *lock) 267static inline int arch_read_trylock(arch_rwlock_t *lock)
268{ 268{
269 atomic_t *count = (atomic_t *)lock; 269 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
270 270
271 if (atomic_dec_return(count) >= 0) 271 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
272 return 1; 272 return 1;
273 atomic_inc(count); 273 READ_LOCK_ATOMIC(inc)(count);
274 return 0; 274 return 0;
275} 275}
276 276
277static inline int arch_write_trylock(arch_rwlock_t *lock) 277static inline int arch_write_trylock(arch_rwlock_t *lock)
278{ 278{
279 atomic_t *count = (atomic_t *)lock; 279 atomic_t *count = (atomic_t *)&lock->write;
280 280
281 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 281 if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
282 return 1; 282 return 1;
283 atomic_add(RW_LOCK_BIAS, count); 283 atomic_add(WRITE_LOCK_CMP, count);
284 return 0; 284 return 0;
285} 285}
286 286
287static inline void arch_read_unlock(arch_rwlock_t *rw) 287static inline void arch_read_unlock(arch_rwlock_t *rw)
288{ 288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 289 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
290 :"+m" (rw->lock) : : "memory");
290} 291}
291 292
292static inline void arch_write_unlock(arch_rwlock_t *rw) 293static inline void arch_write_unlock(arch_rwlock_t *rw)
293{ 294{
294 asm volatile(LOCK_PREFIX "addl %1, %0" 295 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 296 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
296} 297}
297 298
298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 299#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 300#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
300 301
302#undef READ_LOCK_SIZE
303#undef READ_LOCK_ATOMIC
304#undef WRITE_LOCK_ADD
305#undef WRITE_LOCK_SUB
306#undef WRITE_LOCK_CMP
307
301#define arch_spin_relax(lock) cpu_relax() 308#define arch_spin_relax(lock) cpu_relax()
302#define arch_read_relax(lock) cpu_relax() 309#define arch_read_relax(lock) cpu_relax()
303#define arch_write_relax(lock) cpu_relax() 310#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index dcb48b2edc11..7c7a486fcb68 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -11,10 +11,6 @@ typedef struct arch_spinlock {
11 11
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14#include <asm/rwlock.h>
15 unsigned int lock;
16} arch_rwlock_t;
17
18#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 15
20#endif /* _ASM_X86_SPINLOCK_TYPES_H */ 16#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h
index 7bdec4e9b739..92b8aec06970 100644
--- a/arch/x86/include/asm/time.h
+++ b/arch/x86/include/asm/time.h
@@ -1,10 +1,12 @@
1#ifndef _ASM_X86_TIME_H 1#ifndef _ASM_X86_TIME_H
2#define _ASM_X86_TIME_H 2#define _ASM_X86_TIME_H
3 3
4extern void hpet_time_init(void); 4#include <linux/clocksource.h>
5
6#include <asm/mc146818rtc.h> 5#include <asm/mc146818rtc.h>
7 6
7extern void hpet_time_init(void);
8extern void time_init(void); 8extern void time_init(void);
9 9
10extern struct clock_event_device *global_clock_event;
11
10#endif /* _ASM_X86_TIME_H */ 12#endif /* _ASM_X86_TIME_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0310da67307f..2bae0a513b40 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_TRAPS_H 1#ifndef _ASM_X86_TRAPS_H
2#define _ASM_X86_TRAPS_H 2#define _ASM_X86_TRAPS_H
3 3
4#include <linux/kprobes.h>
5
4#include <asm/debugreg.h> 6#include <asm/debugreg.h>
5#include <asm/siginfo.h> /* TRAP_TRACE, ... */ 7#include <asm/siginfo.h> /* TRAP_TRACE, ... */
6 8
@@ -38,6 +40,7 @@ asmlinkage void alignment_check(void);
38asmlinkage void machine_check(void); 40asmlinkage void machine_check(void);
39#endif /* CONFIG_X86_MCE */ 41#endif /* CONFIG_X86_MCE */
40asmlinkage void simd_coprocessor_error(void); 42asmlinkage void simd_coprocessor_error(void);
43asmlinkage void emulate_vsyscall(void);
41 44
42dotraplinkage void do_divide_error(struct pt_regs *, long); 45dotraplinkage void do_divide_error(struct pt_regs *, long);
43dotraplinkage void do_debug(struct pt_regs *, long); 46dotraplinkage void do_debug(struct pt_regs *, long);
@@ -64,6 +67,7 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long);
64dotraplinkage void do_machine_check(struct pt_regs *, long); 67dotraplinkage void do_machine_check(struct pt_regs *, long);
65#endif 68#endif
66dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); 69dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
70dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long);
67#ifdef CONFIG_X86_32 71#ifdef CONFIG_X86_32
68dotraplinkage void do_iret_error(struct pt_regs *, long); 72dotraplinkage void do_iret_error(struct pt_regs *, long);
69#endif 73#endif
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 9db5583b6d38..83e2efd181e2 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,10 +51,6 @@ extern int unsynchronized_tsc(void);
51extern int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void); 52extern unsigned long native_calibrate_tsc(void);
53 53
54#ifdef CONFIG_X86_64
55extern cycles_t vread_tsc(void);
56#endif
57
58/* 54/*
59 * Boot-time check whether the TSCs are synchronized across 55 * Boot-time check whether the TSCs are synchronized across
60 * all CPUs/cores: 56 * all CPUs/cores:
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 99ddd148a760..36361bf6fdd1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; };
555 555
556#endif /* CONFIG_X86_WP_WORKS_OK */ 556#endif /* CONFIG_X86_WP_WORKS_OK */
557 557
558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
560
558/* 561/*
559 * movsl can be slow when source and dest are not both 8-byte aligned 562 * movsl can be slow when source and dest are not both 8-byte aligned
560 */ 563 */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index a291c40efd43..37d369859c8e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -67,7 +67,7 @@
67 * we're using 655us, similar to UV1: 65 units of 10us 67 * we're using 655us, similar to UV1: 65 units of 10us
68 */ 68 */
69#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) 69#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
70#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL) 70#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
71 71
72#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \ 72#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
73 UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \ 73 UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
@@ -106,12 +106,20 @@
106#define DS_SOURCE_TIMEOUT 3 106#define DS_SOURCE_TIMEOUT 3
107/* 107/*
108 * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2 108 * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
109 * values 1 and 5 will not occur 109 * values 1 and 3 will not occur
110 * Decoded meaning ERROR BUSY AUX ERR
111 * ------------------------------- ---- ----- -------
112 * IDLE 0 0 0
113 * BUSY (active) 0 1 0
114 * SW Ack Timeout (destination) 1 0 0
115 * SW Ack INTD rejected (strong NACK) 1 0 1
116 * Source Side Time Out Detected 1 1 0
117 * Destination Side PUT Failed 1 1 1
110 */ 118 */
111#define UV2H_DESC_IDLE 0 119#define UV2H_DESC_IDLE 0
112#define UV2H_DESC_DEST_TIMEOUT 2 120#define UV2H_DESC_BUSY 2
113#define UV2H_DESC_DEST_STRONG_NACK 3 121#define UV2H_DESC_DEST_TIMEOUT 4
114#define UV2H_DESC_BUSY 4 122#define UV2H_DESC_DEST_STRONG_NACK 5
115#define UV2H_DESC_SOURCE_TIMEOUT 6 123#define UV2H_DESC_SOURCE_TIMEOUT 6
116#define UV2H_DESC_DEST_PUT_ERR 7 124#define UV2H_DESC_DEST_PUT_ERR 7
117 125
@@ -183,7 +191,7 @@
183 * 'base_dest_nasid' field of the header corresponds to the 191 * 'base_dest_nasid' field of the header corresponds to the
184 * destination nodeID associated with that specified bit. 192 * destination nodeID associated with that specified bit.
185 */ 193 */
186struct bau_targ_hubmask { 194struct pnmask {
187 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; 195 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
188}; 196};
189 197
@@ -314,7 +322,7 @@ struct bau_msg_header {
314 * Should be 64 bytes 322 * Should be 64 bytes
315 */ 323 */
316struct bau_desc { 324struct bau_desc {
317 struct bau_targ_hubmask distribution; 325 struct pnmask distribution;
318 /* 326 /*
319 * message template, consisting of header and payload: 327 * message template, consisting of header and payload:
320 */ 328 */
@@ -488,6 +496,7 @@ struct bau_control {
488 struct bau_control *uvhub_master; 496 struct bau_control *uvhub_master;
489 struct bau_control *socket_master; 497 struct bau_control *socket_master;
490 struct ptc_stats *statp; 498 struct ptc_stats *statp;
499 cpumask_t *cpumask;
491 unsigned long timeout_interval; 500 unsigned long timeout_interval;
492 unsigned long set_bau_on_time; 501 unsigned long set_bau_on_time;
493 atomic_t active_descriptor_count; 502 atomic_t active_descriptor_count;
@@ -526,90 +535,90 @@ struct bau_control {
526 struct hub_and_pnode *thp; 535 struct hub_and_pnode *thp;
527}; 536};
528 537
529static unsigned long read_mmr_uv2_status(void) 538static inline unsigned long read_mmr_uv2_status(void)
530{ 539{
531 return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2); 540 return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
532} 541}
533 542
534static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) 543static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
535{ 544{
536 write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); 545 write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
537} 546}
538 547
539static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image) 548static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
540{ 549{
541 write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image); 550 write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
542} 551}
543 552
544static void write_mmr_activation(unsigned long index) 553static inline void write_mmr_activation(unsigned long index)
545{ 554{
546 write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 555 write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
547} 556}
548 557
549static void write_gmmr_activation(int pnode, unsigned long mmr_image) 558static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
550{ 559{
551 write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image); 560 write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
552} 561}
553 562
554static void write_mmr_payload_first(int pnode, unsigned long mmr_image) 563static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
555{ 564{
556 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image); 565 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
557} 566}
558 567
559static void write_mmr_payload_tail(int pnode, unsigned long mmr_image) 568static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
560{ 569{
561 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image); 570 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
562} 571}
563 572
564static void write_mmr_payload_last(int pnode, unsigned long mmr_image) 573static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
565{ 574{
566 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image); 575 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
567} 576}
568 577
569static void write_mmr_misc_control(int pnode, unsigned long mmr_image) 578static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
570{ 579{
571 write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); 580 write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
572} 581}
573 582
574static unsigned long read_mmr_misc_control(int pnode) 583static inline unsigned long read_mmr_misc_control(int pnode)
575{ 584{
576 return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL); 585 return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
577} 586}
578 587
579static void write_mmr_sw_ack(unsigned long mr) 588static inline void write_mmr_sw_ack(unsigned long mr)
580{ 589{
581 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); 590 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
582} 591}
583 592
584static unsigned long read_mmr_sw_ack(void) 593static inline unsigned long read_mmr_sw_ack(void)
585{ 594{
586 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 595 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
587} 596}
588 597
589static unsigned long read_gmmr_sw_ack(int pnode) 598static inline unsigned long read_gmmr_sw_ack(int pnode)
590{ 599{
591 return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 600 return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
592} 601}
593 602
594static void write_mmr_data_config(int pnode, unsigned long mr) 603static inline void write_mmr_data_config(int pnode, unsigned long mr)
595{ 604{
596 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); 605 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
597} 606}
598 607
599static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp) 608static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
600{ 609{
601 return constant_test_bit(uvhub, &dstp->bits[0]); 610 return constant_test_bit(uvhub, &dstp->bits[0]);
602} 611}
603static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp) 612static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
604{ 613{
605 __set_bit(pnode, &dstp->bits[0]); 614 __set_bit(pnode, &dstp->bits[0]);
606} 615}
607static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp, 616static inline void bau_uvhubs_clear(struct pnmask *dstp,
608 int nbits) 617 int nbits)
609{ 618{
610 bitmap_zero(&dstp->bits[0], nbits); 619 bitmap_zero(&dstp->bits[0], nbits);
611} 620}
612static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp) 621static inline int bau_uvhub_weight(struct pnmask *dstp)
613{ 622{
614 return bitmap_weight((unsigned long *)&dstp->bits[0], 623 return bitmap_weight((unsigned long *)&dstp->bits[0],
615 UV_DISTRIBUTION_SIZE); 624 UV_DISTRIBUTION_SIZE);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 4be52c863448..10474fb1185d 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -61,1689 +61,2016 @@
61/* Compat: if this #define is present, UV headers support UV2 */ 61/* Compat: if this #define is present, UV headers support UV2 */
62#define UV2_HUB_IS_SUPPORTED 1 62#define UV2_HUB_IS_SUPPORTED 1
63 63
64/* KABI compat: if this #define is present, KABI hacks are present */
65#define UV2_HUB_KABI_HACKS 1
66
67/* ========================================================================= */ 64/* ========================================================================= */
68/* UVH_BAU_DATA_BROADCAST */ 65/* UVH_BAU_DATA_BROADCAST */
69/* ========================================================================= */ 66/* ========================================================================= */
70#define UVH_BAU_DATA_BROADCAST 0x61688UL 67#define UVH_BAU_DATA_BROADCAST 0x61688UL
71#define UVH_BAU_DATA_BROADCAST_32 0x440 68#define UVH_BAU_DATA_BROADCAST_32 0x440
72 69
73#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 70#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
74#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL 71#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
75 72
76union uvh_bau_data_broadcast_u { 73union uvh_bau_data_broadcast_u {
77 unsigned long v; 74 unsigned long v;
78 struct uvh_bau_data_broadcast_s { 75 struct uvh_bau_data_broadcast_s {
79 unsigned long enable : 1; /* RW */ 76 unsigned long enable:1; /* RW */
80 unsigned long rsvd_1_63: 63; /* */ 77 unsigned long rsvd_1_63:63;
81 } s; 78 } s;
82}; 79};
83 80
84/* ========================================================================= */ 81/* ========================================================================= */
85/* UVH_BAU_DATA_CONFIG */ 82/* UVH_BAU_DATA_CONFIG */
86/* ========================================================================= */ 83/* ========================================================================= */
87#define UVH_BAU_DATA_CONFIG 0x61680UL 84#define UVH_BAU_DATA_CONFIG 0x61680UL
88#define UVH_BAU_DATA_CONFIG_32 0x438 85#define UVH_BAU_DATA_CONFIG_32 0x438
89 86
90#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 87#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
91#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL 88#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
92#define UVH_BAU_DATA_CONFIG_DM_SHFT 8 89#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
93#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL 90#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
94#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11 91#define UVH_BAU_DATA_CONFIG_P_SHFT 13
95#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL 92#define UVH_BAU_DATA_CONFIG_T_SHFT 15
96#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12 93#define UVH_BAU_DATA_CONFIG_M_SHFT 16
97#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL 94#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
98#define UVH_BAU_DATA_CONFIG_P_SHFT 13 95#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
99#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL 96#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
100#define UVH_BAU_DATA_CONFIG_T_SHFT 15 97#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
101#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL 98#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
102#define UVH_BAU_DATA_CONFIG_M_SHFT 16 99#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
103#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL 100#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
104#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32 101#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
105#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 102#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
106 103
107union uvh_bau_data_config_u { 104union uvh_bau_data_config_u {
108 unsigned long v; 105 unsigned long v;
109 struct uvh_bau_data_config_s { 106 struct uvh_bau_data_config_s {
110 unsigned long vector_ : 8; /* RW */ 107 unsigned long vector_:8; /* RW */
111 unsigned long dm : 3; /* RW */ 108 unsigned long dm:3; /* RW */
112 unsigned long destmode : 1; /* RW */ 109 unsigned long destmode:1; /* RW */
113 unsigned long status : 1; /* RO */ 110 unsigned long status:1; /* RO */
114 unsigned long p : 1; /* RO */ 111 unsigned long p:1; /* RO */
115 unsigned long rsvd_14 : 1; /* */ 112 unsigned long rsvd_14:1;
116 unsigned long t : 1; /* RO */ 113 unsigned long t:1; /* RO */
117 unsigned long m : 1; /* RW */ 114 unsigned long m:1; /* RW */
118 unsigned long rsvd_17_31: 15; /* */ 115 unsigned long rsvd_17_31:15;
119 unsigned long apic_id : 32; /* RW */ 116 unsigned long apic_id:32; /* RW */
120 } s; 117 } s;
121}; 118};
122 119
123/* ========================================================================= */ 120/* ========================================================================= */
124/* UVH_EVENT_OCCURRED0 */ 121/* UVH_EVENT_OCCURRED0 */
125/* ========================================================================= */ 122/* ========================================================================= */
126#define UVH_EVENT_OCCURRED0 0x70000UL 123#define UVH_EVENT_OCCURRED0 0x70000UL
127#define UVH_EVENT_OCCURRED0_32 0x5e8 124#define UVH_EVENT_OCCURRED0_32 0x5e8
128 125
129#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 126#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
130#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL 127#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
131#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 128#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
132#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL 129#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
133#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 130#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
134#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL 131#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
135#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3 132#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
136#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL 133#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
137#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4 134#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
138#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL 135#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
139#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5 136#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
140#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL 137#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
141#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6 138#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
142#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL 139#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
143#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7 140#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
144#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL 141#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
145#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 142#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
146#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL 143#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
147#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 144#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
148#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL 145#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
149#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 146#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
150#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL 147#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
151#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 148#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
152#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL 149#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
153#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 150#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
154#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL 151#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
155#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 152#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
156#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL 153#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
157#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 154#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
158#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL 155#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
159#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15 156#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
160#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL 157#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
161#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16 158#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
162#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL 159#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
163#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17 160#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
164#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL 161#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
165#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18 162#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
166#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL 163#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
167#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19 164#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
168#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL 165#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
169#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20 166#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
170#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL 167#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
171#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21 168#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
172#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL 169#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
173#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22 170#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
174#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL 171#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
175#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23 172#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
176#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL 173#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
177#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24 174#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
178#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL 175#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
179#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25 176#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
180#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL 177#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
181#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26 178#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
182#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL 179#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
183#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27 180#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
184#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL 181#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
185#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28 182#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
186#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL 183#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
187#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29 184#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
188#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL 185#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
189#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30 186#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
190#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL 187#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
191#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31 188#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
192#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL 189#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
193#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32 190#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
194#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL 191#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
195#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33 192#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
196#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL 193#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
197#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34 194#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
198#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL 195#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
199#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35 196#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
200#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL 197#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
201#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36 198#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
202#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL 199#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
203#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37 200#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
204#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL 201#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
205#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38 202#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
206#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL 203#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
207#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39 204#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
208#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL 205#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
209#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40 206#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
210#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL 207#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
211#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41 208#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
212#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL 209#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
213#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42 210#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
214#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL 211#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
215#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43 212#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
216#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL 213#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
217#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44 214#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
218#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL 215#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
219#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45 216#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
220#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL 217#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
221#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46 218#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
222#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL 219#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
223#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47 220#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
224#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL 221#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
225#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48 222#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
226#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL 223#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
227#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49 224#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
228#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL 225#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
229#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50 226#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
230#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL 227#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
231#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51 228#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
232#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL 229#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
233#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52 230#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
234#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL 231#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
235#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53 232#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
236#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL 233#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
237#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54 234#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
238#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL 235#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
239#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55 236#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
240#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL 237#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
241#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 238#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
242#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL 239#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
243 240
244#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 241#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
245#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL 242#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
246#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 243#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
247#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL 244#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
248#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2 245#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
249#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL 246#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
250#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 247#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
251#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL 248#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
252#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 249#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
253#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL 250#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
254#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 251#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
255#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL 252#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
256#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 253#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
257#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL 254#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
258#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 255#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
259#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL 256#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
260#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 257#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
261#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL 258#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
262#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 259#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
263#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL 260#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
264#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 261#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
265#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL 262#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
266#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 263#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
267#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL 264#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
268#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 265#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
269#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL 266#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
270#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 267#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
271#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL 268#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
272#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 269#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
273#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL 270#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
274#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 271#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
275#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL 272#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
276#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 273#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
277#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL 274#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
278#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 275#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
279#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL 276#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
280#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 277#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
281#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL 278#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
282#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 279#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
283#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL 280#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
284#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 281#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
285#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL 282#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
286#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 283#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
287#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL 284#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
288#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 285#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
289#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL 286#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
290#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 287#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
291#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL 288#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
292#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 289#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
293#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL 290#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
294#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 291#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
295#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL 292#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
296#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 293#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
297#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL 294#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
298#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 295#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
299#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL 296#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
300#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 297#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
301#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL 298#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
302#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 299#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
303#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL 300#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
304#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 301#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
305#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL 302#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
306#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 303#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
307#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL 304#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
308#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 305#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
309#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL 306#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
310#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 307#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
311#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL 308#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
312#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 309#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
313#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL 310#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
314#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 311#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
315#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL 312#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
316#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 313#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
317#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL 314#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
318#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 315#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
319#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL 316#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
320#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 317#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
321#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL 318#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
322#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 319#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
323#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL 320#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
324#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 321#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
325#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL 322#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
326#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 323#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
327#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL 324#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
328#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 325#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
329#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL 326#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
330#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 327#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
331#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL 328#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
332#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 329#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
333#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL 330#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
334#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 331#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
335#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL 332#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
336#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 333#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
337#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL 334#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
338#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 335#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
339#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL 336#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
340#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 337#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
341#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL 338#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
342#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 339#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
343#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL 340#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
344#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 341#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
345#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL 342#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
346#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 343#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
347#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL 344#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
348#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 345#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
349#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL 346#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
350#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 347#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
351#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL 348#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
352#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 349#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
353#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL 350#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
354#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 351#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
355#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL 352#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
356#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 353#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
357#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL 354#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
358#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 355#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
359#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL 356#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
360#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 357#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
361#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL 358#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
362 359
363union uvh_event_occurred0_u { 360union uvh_event_occurred0_u {
364 unsigned long v; 361 unsigned long v;
365 struct uv1h_event_occurred0_s { 362 struct uv1h_event_occurred0_s {
366 unsigned long lb_hcerr : 1; /* RW, W1C */ 363 unsigned long lb_hcerr:1; /* RW, W1C */
367 unsigned long gr0_hcerr : 1; /* RW, W1C */ 364 unsigned long gr0_hcerr:1; /* RW, W1C */
368 unsigned long gr1_hcerr : 1; /* RW, W1C */ 365 unsigned long gr1_hcerr:1; /* RW, W1C */
369 unsigned long lh_hcerr : 1; /* RW, W1C */ 366 unsigned long lh_hcerr:1; /* RW, W1C */
370 unsigned long rh_hcerr : 1; /* RW, W1C */ 367 unsigned long rh_hcerr:1; /* RW, W1C */
371 unsigned long xn_hcerr : 1; /* RW, W1C */ 368 unsigned long xn_hcerr:1; /* RW, W1C */
372 unsigned long si_hcerr : 1; /* RW, W1C */ 369 unsigned long si_hcerr:1; /* RW, W1C */
373 unsigned long lb_aoerr0 : 1; /* RW, W1C */ 370 unsigned long lb_aoerr0:1; /* RW, W1C */
374 unsigned long gr0_aoerr0 : 1; /* RW, W1C */ 371 unsigned long gr0_aoerr0:1; /* RW, W1C */
375 unsigned long gr1_aoerr0 : 1; /* RW, W1C */ 372 unsigned long gr1_aoerr0:1; /* RW, W1C */
376 unsigned long lh_aoerr0 : 1; /* RW, W1C */ 373 unsigned long lh_aoerr0:1; /* RW, W1C */
377 unsigned long rh_aoerr0 : 1; /* RW, W1C */ 374 unsigned long rh_aoerr0:1; /* RW, W1C */
378 unsigned long xn_aoerr0 : 1; /* RW, W1C */ 375 unsigned long xn_aoerr0:1; /* RW, W1C */
379 unsigned long si_aoerr0 : 1; /* RW, W1C */ 376 unsigned long si_aoerr0:1; /* RW, W1C */
380 unsigned long lb_aoerr1 : 1; /* RW, W1C */ 377 unsigned long lb_aoerr1:1; /* RW, W1C */
381 unsigned long gr0_aoerr1 : 1; /* RW, W1C */ 378 unsigned long gr0_aoerr1:1; /* RW, W1C */
382 unsigned long gr1_aoerr1 : 1; /* RW, W1C */ 379 unsigned long gr1_aoerr1:1; /* RW, W1C */
383 unsigned long lh_aoerr1 : 1; /* RW, W1C */ 380 unsigned long lh_aoerr1:1; /* RW, W1C */
384 unsigned long rh_aoerr1 : 1; /* RW, W1C */ 381 unsigned long rh_aoerr1:1; /* RW, W1C */
385 unsigned long xn_aoerr1 : 1; /* RW, W1C */ 382 unsigned long xn_aoerr1:1; /* RW, W1C */
386 unsigned long si_aoerr1 : 1; /* RW, W1C */ 383 unsigned long si_aoerr1:1; /* RW, W1C */
387 unsigned long rh_vpi_int : 1; /* RW, W1C */ 384 unsigned long rh_vpi_int:1; /* RW, W1C */
388 unsigned long system_shutdown_int : 1; /* RW, W1C */ 385 unsigned long system_shutdown_int:1; /* RW, W1C */
389 unsigned long lb_irq_int_0 : 1; /* RW, W1C */ 386 unsigned long lb_irq_int_0:1; /* RW, W1C */
390 unsigned long lb_irq_int_1 : 1; /* RW, W1C */ 387 unsigned long lb_irq_int_1:1; /* RW, W1C */
391 unsigned long lb_irq_int_2 : 1; /* RW, W1C */ 388 unsigned long lb_irq_int_2:1; /* RW, W1C */
392 unsigned long lb_irq_int_3 : 1; /* RW, W1C */ 389 unsigned long lb_irq_int_3:1; /* RW, W1C */
393 unsigned long lb_irq_int_4 : 1; /* RW, W1C */ 390 unsigned long lb_irq_int_4:1; /* RW, W1C */
394 unsigned long lb_irq_int_5 : 1; /* RW, W1C */ 391 unsigned long lb_irq_int_5:1; /* RW, W1C */
395 unsigned long lb_irq_int_6 : 1; /* RW, W1C */ 392 unsigned long lb_irq_int_6:1; /* RW, W1C */
396 unsigned long lb_irq_int_7 : 1; /* RW, W1C */ 393 unsigned long lb_irq_int_7:1; /* RW, W1C */
397 unsigned long lb_irq_int_8 : 1; /* RW, W1C */ 394 unsigned long lb_irq_int_8:1; /* RW, W1C */
398 unsigned long lb_irq_int_9 : 1; /* RW, W1C */ 395 unsigned long lb_irq_int_9:1; /* RW, W1C */
399 unsigned long lb_irq_int_10 : 1; /* RW, W1C */ 396 unsigned long lb_irq_int_10:1; /* RW, W1C */
400 unsigned long lb_irq_int_11 : 1; /* RW, W1C */ 397 unsigned long lb_irq_int_11:1; /* RW, W1C */
401 unsigned long lb_irq_int_12 : 1; /* RW, W1C */ 398 unsigned long lb_irq_int_12:1; /* RW, W1C */
402 unsigned long lb_irq_int_13 : 1; /* RW, W1C */ 399 unsigned long lb_irq_int_13:1; /* RW, W1C */
403 unsigned long lb_irq_int_14 : 1; /* RW, W1C */ 400 unsigned long lb_irq_int_14:1; /* RW, W1C */
404 unsigned long lb_irq_int_15 : 1; /* RW, W1C */ 401 unsigned long lb_irq_int_15:1; /* RW, W1C */
405 unsigned long l1_nmi_int : 1; /* RW, W1C */ 402 unsigned long l1_nmi_int:1; /* RW, W1C */
406 unsigned long stop_clock : 1; /* RW, W1C */ 403 unsigned long stop_clock:1; /* RW, W1C */
407 unsigned long asic_to_l1 : 1; /* RW, W1C */ 404 unsigned long asic_to_l1:1; /* RW, W1C */
408 unsigned long l1_to_asic : 1; /* RW, W1C */ 405 unsigned long l1_to_asic:1; /* RW, W1C */
409 unsigned long ltc_int : 1; /* RW, W1C */ 406 unsigned long ltc_int:1; /* RW, W1C */
410 unsigned long la_seq_trigger : 1; /* RW, W1C */ 407 unsigned long la_seq_trigger:1; /* RW, W1C */
411 unsigned long ipi_int : 1; /* RW, W1C */ 408 unsigned long ipi_int:1; /* RW, W1C */
412 unsigned long extio_int0 : 1; /* RW, W1C */ 409 unsigned long extio_int0:1; /* RW, W1C */
413 unsigned long extio_int1 : 1; /* RW, W1C */ 410 unsigned long extio_int1:1; /* RW, W1C */
414 unsigned long extio_int2 : 1; /* RW, W1C */ 411 unsigned long extio_int2:1; /* RW, W1C */
415 unsigned long extio_int3 : 1; /* RW, W1C */ 412 unsigned long extio_int3:1; /* RW, W1C */
416 unsigned long profile_int : 1; /* RW, W1C */ 413 unsigned long profile_int:1; /* RW, W1C */
417 unsigned long rtc0 : 1; /* RW, W1C */ 414 unsigned long rtc0:1; /* RW, W1C */
418 unsigned long rtc1 : 1; /* RW, W1C */ 415 unsigned long rtc1:1; /* RW, W1C */
419 unsigned long rtc2 : 1; /* RW, W1C */ 416 unsigned long rtc2:1; /* RW, W1C */
420 unsigned long rtc3 : 1; /* RW, W1C */ 417 unsigned long rtc3:1; /* RW, W1C */
421 unsigned long bau_data : 1; /* RW, W1C */ 418 unsigned long bau_data:1; /* RW, W1C */
422 unsigned long power_management_req : 1; /* RW, W1C */ 419 unsigned long power_management_req:1; /* RW, W1C */
423 unsigned long rsvd_57_63 : 7; /* */ 420 unsigned long rsvd_57_63:7;
424 } s1; 421 } s1;
425 struct uv2h_event_occurred0_s { 422 struct uv2h_event_occurred0_s {
426 unsigned long lb_hcerr : 1; /* RW */ 423 unsigned long lb_hcerr:1; /* RW */
427 unsigned long qp_hcerr : 1; /* RW */ 424 unsigned long qp_hcerr:1; /* RW */
428 unsigned long rh_hcerr : 1; /* RW */ 425 unsigned long rh_hcerr:1; /* RW */
429 unsigned long lh0_hcerr : 1; /* RW */ 426 unsigned long lh0_hcerr:1; /* RW */
430 unsigned long lh1_hcerr : 1; /* RW */ 427 unsigned long lh1_hcerr:1; /* RW */
431 unsigned long gr0_hcerr : 1; /* RW */ 428 unsigned long gr0_hcerr:1; /* RW */
432 unsigned long gr1_hcerr : 1; /* RW */ 429 unsigned long gr1_hcerr:1; /* RW */
433 unsigned long ni0_hcerr : 1; /* RW */ 430 unsigned long ni0_hcerr:1; /* RW */
434 unsigned long ni1_hcerr : 1; /* RW */ 431 unsigned long ni1_hcerr:1; /* RW */
435 unsigned long lb_aoerr0 : 1; /* RW */ 432 unsigned long lb_aoerr0:1; /* RW */
436 unsigned long qp_aoerr0 : 1; /* RW */ 433 unsigned long qp_aoerr0:1; /* RW */
437 unsigned long rh_aoerr0 : 1; /* RW */ 434 unsigned long rh_aoerr0:1; /* RW */
438 unsigned long lh0_aoerr0 : 1; /* RW */ 435 unsigned long lh0_aoerr0:1; /* RW */
439 unsigned long lh1_aoerr0 : 1; /* RW */ 436 unsigned long lh1_aoerr0:1; /* RW */
440 unsigned long gr0_aoerr0 : 1; /* RW */ 437 unsigned long gr0_aoerr0:1; /* RW */
441 unsigned long gr1_aoerr0 : 1; /* RW */ 438 unsigned long gr1_aoerr0:1; /* RW */
442 unsigned long xb_aoerr0 : 1; /* RW */ 439 unsigned long xb_aoerr0:1; /* RW */
443 unsigned long rt_aoerr0 : 1; /* RW */ 440 unsigned long rt_aoerr0:1; /* RW */
444 unsigned long ni0_aoerr0 : 1; /* RW */ 441 unsigned long ni0_aoerr0:1; /* RW */
445 unsigned long ni1_aoerr0 : 1; /* RW */ 442 unsigned long ni1_aoerr0:1; /* RW */
446 unsigned long lb_aoerr1 : 1; /* RW */ 443 unsigned long lb_aoerr1:1; /* RW */
447 unsigned long qp_aoerr1 : 1; /* RW */ 444 unsigned long qp_aoerr1:1; /* RW */
448 unsigned long rh_aoerr1 : 1; /* RW */ 445 unsigned long rh_aoerr1:1; /* RW */
449 unsigned long lh0_aoerr1 : 1; /* RW */ 446 unsigned long lh0_aoerr1:1; /* RW */
450 unsigned long lh1_aoerr1 : 1; /* RW */ 447 unsigned long lh1_aoerr1:1; /* RW */
451 unsigned long gr0_aoerr1 : 1; /* RW */ 448 unsigned long gr0_aoerr1:1; /* RW */
452 unsigned long gr1_aoerr1 : 1; /* RW */ 449 unsigned long gr1_aoerr1:1; /* RW */
453 unsigned long xb_aoerr1 : 1; /* RW */ 450 unsigned long xb_aoerr1:1; /* RW */
454 unsigned long rt_aoerr1 : 1; /* RW */ 451 unsigned long rt_aoerr1:1; /* RW */
455 unsigned long ni0_aoerr1 : 1; /* RW */ 452 unsigned long ni0_aoerr1:1; /* RW */
456 unsigned long ni1_aoerr1 : 1; /* RW */ 453 unsigned long ni1_aoerr1:1; /* RW */
457 unsigned long system_shutdown_int : 1; /* RW */ 454 unsigned long system_shutdown_int:1; /* RW */
458 unsigned long lb_irq_int_0 : 1; /* RW */ 455 unsigned long lb_irq_int_0:1; /* RW */
459 unsigned long lb_irq_int_1 : 1; /* RW */ 456 unsigned long lb_irq_int_1:1; /* RW */
460 unsigned long lb_irq_int_2 : 1; /* RW */ 457 unsigned long lb_irq_int_2:1; /* RW */
461 unsigned long lb_irq_int_3 : 1; /* RW */ 458 unsigned long lb_irq_int_3:1; /* RW */
462 unsigned long lb_irq_int_4 : 1; /* RW */ 459 unsigned long lb_irq_int_4:1; /* RW */
463 unsigned long lb_irq_int_5 : 1; /* RW */ 460 unsigned long lb_irq_int_5:1; /* RW */
464 unsigned long lb_irq_int_6 : 1; /* RW */ 461 unsigned long lb_irq_int_6:1; /* RW */
465 unsigned long lb_irq_int_7 : 1; /* RW */ 462 unsigned long lb_irq_int_7:1; /* RW */
466 unsigned long lb_irq_int_8 : 1; /* RW */ 463 unsigned long lb_irq_int_8:1; /* RW */
467 unsigned long lb_irq_int_9 : 1; /* RW */ 464 unsigned long lb_irq_int_9:1; /* RW */
468 unsigned long lb_irq_int_10 : 1; /* RW */ 465 unsigned long lb_irq_int_10:1; /* RW */
469 unsigned long lb_irq_int_11 : 1; /* RW */ 466 unsigned long lb_irq_int_11:1; /* RW */
470 unsigned long lb_irq_int_12 : 1; /* RW */ 467 unsigned long lb_irq_int_12:1; /* RW */
471 unsigned long lb_irq_int_13 : 1; /* RW */ 468 unsigned long lb_irq_int_13:1; /* RW */
472 unsigned long lb_irq_int_14 : 1; /* RW */ 469 unsigned long lb_irq_int_14:1; /* RW */
473 unsigned long lb_irq_int_15 : 1; /* RW */ 470 unsigned long lb_irq_int_15:1; /* RW */
474 unsigned long l1_nmi_int : 1; /* RW */ 471 unsigned long l1_nmi_int:1; /* RW */
475 unsigned long stop_clock : 1; /* RW */ 472 unsigned long stop_clock:1; /* RW */
476 unsigned long asic_to_l1 : 1; /* RW */ 473 unsigned long asic_to_l1:1; /* RW */
477 unsigned long l1_to_asic : 1; /* RW */ 474 unsigned long l1_to_asic:1; /* RW */
478 unsigned long la_seq_trigger : 1; /* RW */ 475 unsigned long la_seq_trigger:1; /* RW */
479 unsigned long ipi_int : 1; /* RW */ 476 unsigned long ipi_int:1; /* RW */
480 unsigned long extio_int0 : 1; /* RW */ 477 unsigned long extio_int0:1; /* RW */
481 unsigned long extio_int1 : 1; /* RW */ 478 unsigned long extio_int1:1; /* RW */
482 unsigned long extio_int2 : 1; /* RW */ 479 unsigned long extio_int2:1; /* RW */
483 unsigned long extio_int3 : 1; /* RW */ 480 unsigned long extio_int3:1; /* RW */
484 unsigned long profile_int : 1; /* RW */ 481 unsigned long profile_int:1; /* RW */
485 unsigned long rsvd_59_63 : 5; /* */ 482 unsigned long rsvd_59_63:5;
486 } s2; 483 } s2;
487}; 484};
488 485
489/* ========================================================================= */ 486/* ========================================================================= */
490/* UVH_EVENT_OCCURRED0_ALIAS */ 487/* UVH_EVENT_OCCURRED0_ALIAS */
491/* ========================================================================= */ 488/* ========================================================================= */
492#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL 489#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
493#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 490#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
494 491
495/* ========================================================================= */ 492/* ========================================================================= */
496/* UVH_GR0_TLB_INT0_CONFIG */ 493/* UVH_GR0_TLB_INT0_CONFIG */
497/* ========================================================================= */ 494/* ========================================================================= */
498#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL 495#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
499 496
500#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 497#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
501#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL 498#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
502#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 499#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
503#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL 500#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
504#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 501#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
505#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL 502#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
506#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 503#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
507#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL 504#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
508#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 505#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
509#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL 506#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
510#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 507#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
511#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL 508#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
512#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 509#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
513#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL 510#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
514#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 511#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
515#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 512#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
516 513
517union uvh_gr0_tlb_int0_config_u { 514union uvh_gr0_tlb_int0_config_u {
518 unsigned long v; 515 unsigned long v;
519 struct uvh_gr0_tlb_int0_config_s { 516 struct uvh_gr0_tlb_int0_config_s {
520 unsigned long vector_ : 8; /* RW */ 517 unsigned long vector_:8; /* RW */
521 unsigned long dm : 3; /* RW */ 518 unsigned long dm:3; /* RW */
522 unsigned long destmode : 1; /* RW */ 519 unsigned long destmode:1; /* RW */
523 unsigned long status : 1; /* RO */ 520 unsigned long status:1; /* RO */
524 unsigned long p : 1; /* RO */ 521 unsigned long p:1; /* RO */
525 unsigned long rsvd_14 : 1; /* */ 522 unsigned long rsvd_14:1;
526 unsigned long t : 1; /* RO */ 523 unsigned long t:1; /* RO */
527 unsigned long m : 1; /* RW */ 524 unsigned long m:1; /* RW */
528 unsigned long rsvd_17_31: 15; /* */ 525 unsigned long rsvd_17_31:15;
529 unsigned long apic_id : 32; /* RW */ 526 unsigned long apic_id:32; /* RW */
530 } s; 527 } s;
531}; 528};
532 529
533/* ========================================================================= */ 530/* ========================================================================= */
534/* UVH_GR0_TLB_INT1_CONFIG */ 531/* UVH_GR0_TLB_INT1_CONFIG */
535/* ========================================================================= */ 532/* ========================================================================= */
536#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL 533#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
537 534
538#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 535#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
539#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL 536#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
540#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 537#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
541#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL 538#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
542#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 539#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
543#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL 540#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
544#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 541#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
545#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL 542#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
546#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 543#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
547#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL 544#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
548#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 545#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
549#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL 546#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
550#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 547#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
551#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL 548#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
552#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 549#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
553#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 550#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
554 551
555union uvh_gr0_tlb_int1_config_u { 552union uvh_gr0_tlb_int1_config_u {
556 unsigned long v; 553 unsigned long v;
557 struct uvh_gr0_tlb_int1_config_s { 554 struct uvh_gr0_tlb_int1_config_s {
558 unsigned long vector_ : 8; /* RW */ 555 unsigned long vector_:8; /* RW */
559 unsigned long dm : 3; /* RW */ 556 unsigned long dm:3; /* RW */
560 unsigned long destmode : 1; /* RW */ 557 unsigned long destmode:1; /* RW */
561 unsigned long status : 1; /* RO */ 558 unsigned long status:1; /* RO */
562 unsigned long p : 1; /* RO */ 559 unsigned long p:1; /* RO */
563 unsigned long rsvd_14 : 1; /* */ 560 unsigned long rsvd_14:1;
564 unsigned long t : 1; /* RO */ 561 unsigned long t:1; /* RO */
565 unsigned long m : 1; /* RW */ 562 unsigned long m:1; /* RW */
566 unsigned long rsvd_17_31: 15; /* */ 563 unsigned long rsvd_17_31:15;
567 unsigned long apic_id : 32; /* RW */ 564 unsigned long apic_id:32; /* RW */
568 } s; 565 } s;
566};
567
568/* ========================================================================= */
569/* UVH_GR0_TLB_MMR_CONTROL */
570/* ========================================================================= */
571#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
572#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
573#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \
574 UV1H_GR0_TLB_MMR_CONTROL : \
575 UV2H_GR0_TLB_MMR_CONTROL)
576
577#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
578#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
579#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
580#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
581#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
582#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
583#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
584#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
585#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
586#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
587#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
588#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
589
590#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
591#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
592#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
593#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
594#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
595#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
596#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
597#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
598#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
599#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
600#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
601#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
602#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
603#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
604#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
605#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
606#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
607#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
608#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
609#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
610#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
611#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
612
613#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
614#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
615#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
616#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
617#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
618#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
619#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
620#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
621#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
622#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
623#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
624#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
625#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
626#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
627#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
628#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
629#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
630#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
631
632union uvh_gr0_tlb_mmr_control_u {
633 unsigned long v;
634 struct uvh_gr0_tlb_mmr_control_s {
635 unsigned long index:12; /* RW */
636 unsigned long mem_sel:2; /* RW */
637 unsigned long rsvd_14_15:2;
638 unsigned long auto_valid_en:1; /* RW */
639 unsigned long rsvd_17_19:3;
640 unsigned long mmr_hash_index_en:1; /* RW */
641 unsigned long rsvd_21_29:9;
642 unsigned long mmr_write:1; /* WP */
643 unsigned long mmr_read:1; /* WP */
644 unsigned long rsvd_32_63:32;
645 } s;
646 struct uv1h_gr0_tlb_mmr_control_s {
647 unsigned long index:12; /* RW */
648 unsigned long mem_sel:2; /* RW */
649 unsigned long rsvd_14_15:2;
650 unsigned long auto_valid_en:1; /* RW */
651 unsigned long rsvd_17_19:3;
652 unsigned long mmr_hash_index_en:1; /* RW */
653 unsigned long rsvd_21_29:9;
654 unsigned long mmr_write:1; /* WP */
655 unsigned long mmr_read:1; /* WP */
656 unsigned long rsvd_32_47:16;
657 unsigned long mmr_inj_con:1; /* RW */
658 unsigned long rsvd_49_51:3;
659 unsigned long mmr_inj_tlbram:1; /* RW */
660 unsigned long rsvd_53:1;
661 unsigned long mmr_inj_tlbpgsize:1; /* RW */
662 unsigned long rsvd_55:1;
663 unsigned long mmr_inj_tlbrreg:1; /* RW */
664 unsigned long rsvd_57_59:3;
665 unsigned long mmr_inj_tlblruv:1; /* RW */
666 unsigned long rsvd_61_63:3;
667 } s1;
668 struct uv2h_gr0_tlb_mmr_control_s {
669 unsigned long index:12; /* RW */
670 unsigned long mem_sel:2; /* RW */
671 unsigned long rsvd_14_15:2;
672 unsigned long auto_valid_en:1; /* RW */
673 unsigned long rsvd_17_19:3;
674 unsigned long mmr_hash_index_en:1; /* RW */
675 unsigned long rsvd_21_29:9;
676 unsigned long mmr_write:1; /* WP */
677 unsigned long mmr_read:1; /* WP */
678 unsigned long mmr_op_done:1; /* RW */
679 unsigned long rsvd_33_47:15;
680 unsigned long mmr_inj_con:1; /* RW */
681 unsigned long rsvd_49_51:3;
682 unsigned long mmr_inj_tlbram:1; /* RW */
683 unsigned long rsvd_53_63:11;
684 } s2;
685};
686
687/* ========================================================================= */
688/* UVH_GR0_TLB_MMR_READ_DATA_HI */
689/* ========================================================================= */
690#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
691#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
692#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
693 UV1H_GR0_TLB_MMR_READ_DATA_HI : \
694 UV2H_GR0_TLB_MMR_READ_DATA_HI)
695
696#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
697#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
698#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
699#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
700#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
701#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
702#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
703#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
704
705union uvh_gr0_tlb_mmr_read_data_hi_u {
706 unsigned long v;
707 struct uvh_gr0_tlb_mmr_read_data_hi_s {
708 unsigned long pfn:41; /* RO */
709 unsigned long gaa:2; /* RO */
710 unsigned long dirty:1; /* RO */
711 unsigned long larger:1; /* RO */
712 unsigned long rsvd_45_63:19;
713 } s;
714};
715
716/* ========================================================================= */
717/* UVH_GR0_TLB_MMR_READ_DATA_LO */
718/* ========================================================================= */
719#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
720#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
721#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
722 UV1H_GR0_TLB_MMR_READ_DATA_LO : \
723 UV2H_GR0_TLB_MMR_READ_DATA_LO)
724
725#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
726#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
727#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
728#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
729#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
730#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
731
732union uvh_gr0_tlb_mmr_read_data_lo_u {
733 unsigned long v;
734 struct uvh_gr0_tlb_mmr_read_data_lo_s {
735 unsigned long vpn:39; /* RO */
736 unsigned long asid:24; /* RO */
737 unsigned long valid:1; /* RO */
738 } s;
569}; 739};
570 740
571/* ========================================================================= */ 741/* ========================================================================= */
572/* UVH_GR1_TLB_INT0_CONFIG */ 742/* UVH_GR1_TLB_INT0_CONFIG */
573/* ========================================================================= */ 743/* ========================================================================= */
574#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL 744#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
575 745
576#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 746#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
577#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL 747#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
578#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 748#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
579#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL 749#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
580#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 750#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
581#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL 751#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
582#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 752#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
583#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL 753#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
584#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 754#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
585#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL 755#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
586#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 756#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
587#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL 757#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
588#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 758#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
589#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL 759#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
590#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 760#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
591#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 761#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
592 762
593union uvh_gr1_tlb_int0_config_u { 763union uvh_gr1_tlb_int0_config_u {
594 unsigned long v; 764 unsigned long v;
595 struct uvh_gr1_tlb_int0_config_s { 765 struct uvh_gr1_tlb_int0_config_s {
596 unsigned long vector_ : 8; /* RW */ 766 unsigned long vector_:8; /* RW */
597 unsigned long dm : 3; /* RW */ 767 unsigned long dm:3; /* RW */
598 unsigned long destmode : 1; /* RW */ 768 unsigned long destmode:1; /* RW */
599 unsigned long status : 1; /* RO */ 769 unsigned long status:1; /* RO */
600 unsigned long p : 1; /* RO */ 770 unsigned long p:1; /* RO */
601 unsigned long rsvd_14 : 1; /* */ 771 unsigned long rsvd_14:1;
602 unsigned long t : 1; /* RO */ 772 unsigned long t:1; /* RO */
603 unsigned long m : 1; /* RW */ 773 unsigned long m:1; /* RW */
604 unsigned long rsvd_17_31: 15; /* */ 774 unsigned long rsvd_17_31:15;
605 unsigned long apic_id : 32; /* RW */ 775 unsigned long apic_id:32; /* RW */
606 } s; 776 } s;
607}; 777};
608 778
609/* ========================================================================= */ 779/* ========================================================================= */
610/* UVH_GR1_TLB_INT1_CONFIG */ 780/* UVH_GR1_TLB_INT1_CONFIG */
611/* ========================================================================= */ 781/* ========================================================================= */
612#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL 782#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
613 783
614#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 784#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
615#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL 785#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
616#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 786#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
617#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL 787#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
618#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 788#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
619#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL 789#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
620#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 790#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
621#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL 791#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
622#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 792#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
623#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL 793#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
624#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 794#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
625#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL 795#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
626#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 796#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
627#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL 797#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
628#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 798#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
629#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 799#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
630 800
631union uvh_gr1_tlb_int1_config_u { 801union uvh_gr1_tlb_int1_config_u {
632 unsigned long v; 802 unsigned long v;
633 struct uvh_gr1_tlb_int1_config_s { 803 struct uvh_gr1_tlb_int1_config_s {
634 unsigned long vector_ : 8; /* RW */ 804 unsigned long vector_:8; /* RW */
635 unsigned long dm : 3; /* RW */ 805 unsigned long dm:3; /* RW */
636 unsigned long destmode : 1; /* RW */ 806 unsigned long destmode:1; /* RW */
637 unsigned long status : 1; /* RO */ 807 unsigned long status:1; /* RO */
638 unsigned long p : 1; /* RO */ 808 unsigned long p:1; /* RO */
639 unsigned long rsvd_14 : 1; /* */ 809 unsigned long rsvd_14:1;
640 unsigned long t : 1; /* RO */ 810 unsigned long t:1; /* RO */
641 unsigned long m : 1; /* RW */ 811 unsigned long m:1; /* RW */
642 unsigned long rsvd_17_31: 15; /* */ 812 unsigned long rsvd_17_31:15;
643 unsigned long apic_id : 32; /* RW */ 813 unsigned long apic_id:32; /* RW */
644 } s; 814 } s;
815};
816
817/* ========================================================================= */
818/* UVH_GR1_TLB_MMR_CONTROL */
819/* ========================================================================= */
820#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
821#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
822#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \
823 UV1H_GR1_TLB_MMR_CONTROL : \
824 UV2H_GR1_TLB_MMR_CONTROL)
825
826#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
827#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
828#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
829#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
830#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
831#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
832#define UVH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
833#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
834#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
835#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
836#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
837#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
838
839#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
840#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
841#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
842#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
843#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
844#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
845#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
846#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
847#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
848#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
849#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
850#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
851#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
852#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
853#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
854#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
855#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
856#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
857#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
858#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
859#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
860#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
861
862#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
863#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
864#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
865#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
866#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
867#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
868#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
869#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
870#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
871#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
872#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
873#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
874#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
875#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
876#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
877#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
878#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
879#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
880
881union uvh_gr1_tlb_mmr_control_u {
882 unsigned long v;
883 struct uvh_gr1_tlb_mmr_control_s {
884 unsigned long index:12; /* RW */
885 unsigned long mem_sel:2; /* RW */
886 unsigned long rsvd_14_15:2;
887 unsigned long auto_valid_en:1; /* RW */
888 unsigned long rsvd_17_19:3;
889 unsigned long mmr_hash_index_en:1; /* RW */
890 unsigned long rsvd_21_29:9;
891 unsigned long mmr_write:1; /* WP */
892 unsigned long mmr_read:1; /* WP */
893 unsigned long rsvd_32_63:32;
894 } s;
895 struct uv1h_gr1_tlb_mmr_control_s {
896 unsigned long index:12; /* RW */
897 unsigned long mem_sel:2; /* RW */
898 unsigned long rsvd_14_15:2;
899 unsigned long auto_valid_en:1; /* RW */
900 unsigned long rsvd_17_19:3;
901 unsigned long mmr_hash_index_en:1; /* RW */
902 unsigned long rsvd_21_29:9;
903 unsigned long mmr_write:1; /* WP */
904 unsigned long mmr_read:1; /* WP */
905 unsigned long rsvd_32_47:16;
906 unsigned long mmr_inj_con:1; /* RW */
907 unsigned long rsvd_49_51:3;
908 unsigned long mmr_inj_tlbram:1; /* RW */
909 unsigned long rsvd_53:1;
910 unsigned long mmr_inj_tlbpgsize:1; /* RW */
911 unsigned long rsvd_55:1;
912 unsigned long mmr_inj_tlbrreg:1; /* RW */
913 unsigned long rsvd_57_59:3;
914 unsigned long mmr_inj_tlblruv:1; /* RW */
915 unsigned long rsvd_61_63:3;
916 } s1;
917 struct uv2h_gr1_tlb_mmr_control_s {
918 unsigned long index:12; /* RW */
919 unsigned long mem_sel:2; /* RW */
920 unsigned long rsvd_14_15:2;
921 unsigned long auto_valid_en:1; /* RW */
922 unsigned long rsvd_17_19:3;
923 unsigned long mmr_hash_index_en:1; /* RW */
924 unsigned long rsvd_21_29:9;
925 unsigned long mmr_write:1; /* WP */
926 unsigned long mmr_read:1; /* WP */
927 unsigned long mmr_op_done:1; /* RW */
928 unsigned long rsvd_33_47:15;
929 unsigned long mmr_inj_con:1; /* RW */
930 unsigned long rsvd_49_51:3;
931 unsigned long mmr_inj_tlbram:1; /* RW */
932 unsigned long rsvd_53_63:11;
933 } s2;
934};
935
936/* ========================================================================= */
937/* UVH_GR1_TLB_MMR_READ_DATA_HI */
938/* ========================================================================= */
939#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
940#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
941#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
942 UV1H_GR1_TLB_MMR_READ_DATA_HI : \
943 UV2H_GR1_TLB_MMR_READ_DATA_HI)
944
945#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
946#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
947#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
948#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
949#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
950#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
951#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
952#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
953
954union uvh_gr1_tlb_mmr_read_data_hi_u {
955 unsigned long v;
956 struct uvh_gr1_tlb_mmr_read_data_hi_s {
957 unsigned long pfn:41; /* RO */
958 unsigned long gaa:2; /* RO */
959 unsigned long dirty:1; /* RO */
960 unsigned long larger:1; /* RO */
961 unsigned long rsvd_45_63:19;
962 } s;
963};
964
965/* ========================================================================= */
966/* UVH_GR1_TLB_MMR_READ_DATA_LO */
967/* ========================================================================= */
968#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
969#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
970#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
971 UV1H_GR1_TLB_MMR_READ_DATA_LO : \
972 UV2H_GR1_TLB_MMR_READ_DATA_LO)
973
974#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
975#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
976#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
977#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
978#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
979#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
980
981union uvh_gr1_tlb_mmr_read_data_lo_u {
982 unsigned long v;
983 struct uvh_gr1_tlb_mmr_read_data_lo_s {
984 unsigned long vpn:39; /* RO */
985 unsigned long asid:24; /* RO */
986 unsigned long valid:1; /* RO */
987 } s;
645}; 988};
646 989
647/* ========================================================================= */ 990/* ========================================================================= */
648/* UVH_INT_CMPB */ 991/* UVH_INT_CMPB */
649/* ========================================================================= */ 992/* ========================================================================= */
650#define UVH_INT_CMPB 0x22080UL 993#define UVH_INT_CMPB 0x22080UL
651 994
652#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 995#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
653#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL 996#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
654 997
655union uvh_int_cmpb_u { 998union uvh_int_cmpb_u {
656 unsigned long v; 999 unsigned long v;
657 struct uvh_int_cmpb_s { 1000 struct uvh_int_cmpb_s {
658 unsigned long real_time_cmpb : 56; /* RW */ 1001 unsigned long real_time_cmpb:56; /* RW */
659 unsigned long rsvd_56_63 : 8; /* */ 1002 unsigned long rsvd_56_63:8;
660 } s; 1003 } s;
661}; 1004};
662 1005
663/* ========================================================================= */ 1006/* ========================================================================= */
664/* UVH_INT_CMPC */ 1007/* UVH_INT_CMPC */
665/* ========================================================================= */ 1008/* ========================================================================= */
666#define UVH_INT_CMPC 0x22100UL 1009#define UVH_INT_CMPC 0x22100UL
667 1010
668#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0 1011#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
669#define UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT 0 1012#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
670#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT (is_uv1_hub() ? \
671 UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT : \
672 UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT)
673#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
674#define UV2H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
675#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK (is_uv1_hub() ? \
676 UV1H_INT_CMPC_REAL_TIME_CMPC_MASK : \
677 UV2H_INT_CMPC_REAL_TIME_CMPC_MASK)
678 1013
679union uvh_int_cmpc_u { 1014union uvh_int_cmpc_u {
680 unsigned long v; 1015 unsigned long v;
681 struct uvh_int_cmpc_s { 1016 struct uvh_int_cmpc_s {
682 unsigned long real_time_cmpc : 56; /* RW */ 1017 unsigned long real_time_cmpc:56; /* RW */
683 unsigned long rsvd_56_63 : 8; /* */ 1018 unsigned long rsvd_56_63:8;
684 } s; 1019 } s;
685}; 1020};
686 1021
687/* ========================================================================= */ 1022/* ========================================================================= */
688/* UVH_INT_CMPD */ 1023/* UVH_INT_CMPD */
689/* ========================================================================= */ 1024/* ========================================================================= */
690#define UVH_INT_CMPD 0x22180UL 1025#define UVH_INT_CMPD 0x22180UL
691 1026
692#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0 1027#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
693#define UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT 0 1028#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
694#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT (is_uv1_hub() ? \
695 UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT : \
696 UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT)
697#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
698#define UV2H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
699#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK (is_uv1_hub() ? \
700 UV1H_INT_CMPD_REAL_TIME_CMPD_MASK : \
701 UV2H_INT_CMPD_REAL_TIME_CMPD_MASK)
702 1029
703union uvh_int_cmpd_u { 1030union uvh_int_cmpd_u {
704 unsigned long v; 1031 unsigned long v;
705 struct uvh_int_cmpd_s { 1032 struct uvh_int_cmpd_s {
706 unsigned long real_time_cmpd : 56; /* RW */ 1033 unsigned long real_time_cmpd:56; /* RW */
707 unsigned long rsvd_56_63 : 8; /* */ 1034 unsigned long rsvd_56_63:8;
708 } s; 1035 } s;
709}; 1036};
710 1037
711/* ========================================================================= */ 1038/* ========================================================================= */
712/* UVH_IPI_INT */ 1039/* UVH_IPI_INT */
713/* ========================================================================= */ 1040/* ========================================================================= */
714#define UVH_IPI_INT 0x60500UL 1041#define UVH_IPI_INT 0x60500UL
715#define UVH_IPI_INT_32 0x348 1042#define UVH_IPI_INT_32 0x348
716 1043
717#define UVH_IPI_INT_VECTOR_SHFT 0 1044#define UVH_IPI_INT_VECTOR_SHFT 0
718#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL 1045#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
719#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 1046#define UVH_IPI_INT_DESTMODE_SHFT 11
720#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL 1047#define UVH_IPI_INT_APIC_ID_SHFT 16
721#define UVH_IPI_INT_DESTMODE_SHFT 11 1048#define UVH_IPI_INT_SEND_SHFT 63
722#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL 1049#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
723#define UVH_IPI_INT_APIC_ID_SHFT 16 1050#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
724#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL 1051#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
725#define UVH_IPI_INT_SEND_SHFT 63 1052#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
726#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL 1053#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
727 1054
728union uvh_ipi_int_u { 1055union uvh_ipi_int_u {
729 unsigned long v; 1056 unsigned long v;
730 struct uvh_ipi_int_s { 1057 struct uvh_ipi_int_s {
731 unsigned long vector_ : 8; /* RW */ 1058 unsigned long vector_:8; /* RW */
732 unsigned long delivery_mode : 3; /* RW */ 1059 unsigned long delivery_mode:3; /* RW */
733 unsigned long destmode : 1; /* RW */ 1060 unsigned long destmode:1; /* RW */
734 unsigned long rsvd_12_15 : 4; /* */ 1061 unsigned long rsvd_12_15:4;
735 unsigned long apic_id : 32; /* RW */ 1062 unsigned long apic_id:32; /* RW */
736 unsigned long rsvd_48_62 : 15; /* */ 1063 unsigned long rsvd_48_62:15;
737 unsigned long send : 1; /* WP */ 1064 unsigned long send:1; /* WP */
738 } s; 1065 } s;
739}; 1066};
740 1067
741/* ========================================================================= */ 1068/* ========================================================================= */
742/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 1069/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
743/* ========================================================================= */ 1070/* ========================================================================= */
744#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 1071#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
745#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 1072#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
746 1073
747#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 1074#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
748#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
749#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 1075#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
1076#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
750#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL 1077#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
751 1078
752union uvh_lb_bau_intd_payload_queue_first_u { 1079union uvh_lb_bau_intd_payload_queue_first_u {
753 unsigned long v; 1080 unsigned long v;
754 struct uvh_lb_bau_intd_payload_queue_first_s { 1081 struct uvh_lb_bau_intd_payload_queue_first_s {
755 unsigned long rsvd_0_3: 4; /* */ 1082 unsigned long rsvd_0_3:4;
756 unsigned long address : 39; /* RW */ 1083 unsigned long address:39; /* RW */
757 unsigned long rsvd_43_48: 6; /* */ 1084 unsigned long rsvd_43_48:6;
758 unsigned long node_id : 14; /* RW */ 1085 unsigned long node_id:14; /* RW */
759 unsigned long rsvd_63 : 1; /* */ 1086 unsigned long rsvd_63:1;
760 } s; 1087 } s;
761}; 1088};
762 1089
763/* ========================================================================= */ 1090/* ========================================================================= */
764/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 1091/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
765/* ========================================================================= */ 1092/* ========================================================================= */
766#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 1093#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
767#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 1094#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
768 1095
769#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 1096#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
770#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 1097#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
771 1098
772union uvh_lb_bau_intd_payload_queue_last_u { 1099union uvh_lb_bau_intd_payload_queue_last_u {
773 unsigned long v; 1100 unsigned long v;
774 struct uvh_lb_bau_intd_payload_queue_last_s { 1101 struct uvh_lb_bau_intd_payload_queue_last_s {
775 unsigned long rsvd_0_3: 4; /* */ 1102 unsigned long rsvd_0_3:4;
776 unsigned long address : 39; /* RW */ 1103 unsigned long address:39; /* RW */
777 unsigned long rsvd_43_63: 21; /* */ 1104 unsigned long rsvd_43_63:21;
778 } s; 1105 } s;
779}; 1106};
780 1107
781/* ========================================================================= */ 1108/* ========================================================================= */
782/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 1109/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
783/* ========================================================================= */ 1110/* ========================================================================= */
784#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 1111#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
785#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 1112#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
786 1113
787#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 1114#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
788#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 1115#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
789 1116
790union uvh_lb_bau_intd_payload_queue_tail_u { 1117union uvh_lb_bau_intd_payload_queue_tail_u {
791 unsigned long v; 1118 unsigned long v;
792 struct uvh_lb_bau_intd_payload_queue_tail_s { 1119 struct uvh_lb_bau_intd_payload_queue_tail_s {
793 unsigned long rsvd_0_3: 4; /* */ 1120 unsigned long rsvd_0_3:4;
794 unsigned long address : 39; /* RW */ 1121 unsigned long address:39; /* RW */
795 unsigned long rsvd_43_63: 21; /* */ 1122 unsigned long rsvd_43_63:21;
796 } s; 1123 } s;
797}; 1124};
798 1125
799/* ========================================================================= */ 1126/* ========================================================================= */
800/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 1127/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
801/* ========================================================================= */ 1128/* ========================================================================= */
802#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 1129#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
803#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 1130#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
804 1131
805#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 1132#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
806#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
807#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 1133#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
808#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
809#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 1134#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
810#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
811#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 1135#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
812#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
813#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 1136#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
814#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
815#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 1137#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
816#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
817#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 1138#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
818#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
819#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 1139#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
820#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
821#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 1140#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
822#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
823#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 1141#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
824#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
825#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 1142#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
826#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
827#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 1143#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
828#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
829#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 1144#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
830#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
831#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 1145#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
832#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
833#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 1146#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
834#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
835#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 1147#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
1148#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
1149#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
1150#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
1151#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
1152#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
1153#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
1154#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
1155#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
1156#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
1157#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
1158#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
1159#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
1160#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
1161#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
1162#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
836#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL 1163#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
837 1164
838union uvh_lb_bau_intd_software_acknowledge_u { 1165union uvh_lb_bau_intd_software_acknowledge_u {
839 unsigned long v; 1166 unsigned long v;
840 struct uvh_lb_bau_intd_software_acknowledge_s { 1167 struct uvh_lb_bau_intd_software_acknowledge_s {
841 unsigned long pending_0 : 1; /* RW, W1C */ 1168 unsigned long pending_0:1; /* RW, W1C */
842 unsigned long pending_1 : 1; /* RW, W1C */ 1169 unsigned long pending_1:1; /* RW, W1C */
843 unsigned long pending_2 : 1; /* RW, W1C */ 1170 unsigned long pending_2:1; /* RW, W1C */
844 unsigned long pending_3 : 1; /* RW, W1C */ 1171 unsigned long pending_3:1; /* RW, W1C */
845 unsigned long pending_4 : 1; /* RW, W1C */ 1172 unsigned long pending_4:1; /* RW, W1C */
846 unsigned long pending_5 : 1; /* RW, W1C */ 1173 unsigned long pending_5:1; /* RW, W1C */
847 unsigned long pending_6 : 1; /* RW, W1C */ 1174 unsigned long pending_6:1; /* RW, W1C */
848 unsigned long pending_7 : 1; /* RW, W1C */ 1175 unsigned long pending_7:1; /* RW, W1C */
849 unsigned long timeout_0 : 1; /* RW, W1C */ 1176 unsigned long timeout_0:1; /* RW, W1C */
850 unsigned long timeout_1 : 1; /* RW, W1C */ 1177 unsigned long timeout_1:1; /* RW, W1C */
851 unsigned long timeout_2 : 1; /* RW, W1C */ 1178 unsigned long timeout_2:1; /* RW, W1C */
852 unsigned long timeout_3 : 1; /* RW, W1C */ 1179 unsigned long timeout_3:1; /* RW, W1C */
853 unsigned long timeout_4 : 1; /* RW, W1C */ 1180 unsigned long timeout_4:1; /* RW, W1C */
854 unsigned long timeout_5 : 1; /* RW, W1C */ 1181 unsigned long timeout_5:1; /* RW, W1C */
855 unsigned long timeout_6 : 1; /* RW, W1C */ 1182 unsigned long timeout_6:1; /* RW, W1C */
856 unsigned long timeout_7 : 1; /* RW, W1C */ 1183 unsigned long timeout_7:1; /* RW, W1C */
857 unsigned long rsvd_16_63: 48; /* */ 1184 unsigned long rsvd_16_63:48;
858 } s; 1185 } s;
859}; 1186};
860 1187
861/* ========================================================================= */ 1188/* ========================================================================= */
862/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 1189/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
863/* ========================================================================= */ 1190/* ========================================================================= */
864#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 1191#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
865#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 1192#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
866 1193
867/* ========================================================================= */ 1194/* ========================================================================= */
868/* UVH_LB_BAU_MISC_CONTROL */ 1195/* UVH_LB_BAU_MISC_CONTROL */
869/* ========================================================================= */ 1196/* ========================================================================= */
870#define UVH_LB_BAU_MISC_CONTROL 0x320170UL 1197#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
871#define UVH_LB_BAU_MISC_CONTROL_32 0xa10 1198#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
872 1199
873#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1200#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
874#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 1201#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
875#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1202#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
876#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL 1203#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
877#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
878#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
879#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
880#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
881#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 1204#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
882#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
883#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 1205#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
884#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
885#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 1206#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
886#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
887#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 1207#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
888#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
889#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 1208#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
890#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
891#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 1209#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
892#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
893#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 1210#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
894#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
895#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 1211#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
896#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
897#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 1212#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
898#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
899#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 1213#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
900#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
901#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 1214#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1215#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1216#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1217#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1218#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1219#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1220#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1221#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1222#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1223#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1224#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1225#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1226#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1227#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1228#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
902#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1229#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
903 1230
904#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1231#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
905#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 1232#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
906#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1233#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
907#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL 1234#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
908#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
909#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
910#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
911#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
912#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 1235#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
913#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
914#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 1236#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
915#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
916#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 1237#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
917#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
918#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 1238#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
919#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
920#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 1239#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
921#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
922#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 1240#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
923#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
924#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 1241#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
925#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
926#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 1242#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
927#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
928#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 1243#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
929#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
930#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 1244#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
931#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
932#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 1245#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1246#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1247#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1248#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1249#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1250#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1251#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1252#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1253#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1254#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1255#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1256#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1257#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1258#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1259#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1260#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
933#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1261#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
934#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 1262#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
935#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 1263
936 1264#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
937#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1265#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
938#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 1266#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
939#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1267#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
940#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
941#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
942#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
943#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
944#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
945#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 1268#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
946#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
947#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 1269#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
948#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
949#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 1270#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
950#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
951#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 1271#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
952#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
953#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 1272#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
954#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
955#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 1273#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
956#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
957#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 1274#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
958#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
959#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 1275#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
960#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
961#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 1276#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
962#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
963#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 1277#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
964#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
965#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 1278#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
966#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
967#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 1279#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
968#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL 1280#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
969#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
970#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
971#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 1281#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
972#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
973#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 1282#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
974#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
975#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 1283#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
976#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
977#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 1284#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
978#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
979#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 1285#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
1286#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1287#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1288#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1289#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1290#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1291#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1292#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1293#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1294#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1295#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1296#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1297#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1298#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1299#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1300#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1301#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1302#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
1303#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
1304#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
1305#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
1306#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
1307#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
980#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL 1308#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
981#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 1309#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
982#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
983 1310
984union uvh_lb_bau_misc_control_u { 1311union uvh_lb_bau_misc_control_u {
985 unsigned long v; 1312 unsigned long v;
986 struct uvh_lb_bau_misc_control_s { 1313 struct uvh_lb_bau_misc_control_s {
987 unsigned long rejection_delay : 8; /* RW */ 1314 unsigned long rejection_delay:8; /* RW */
988 unsigned long apic_mode : 1; /* RW */ 1315 unsigned long apic_mode:1; /* RW */
989 unsigned long force_broadcast : 1; /* RW */ 1316 unsigned long force_broadcast:1; /* RW */
990 unsigned long force_lock_nop : 1; /* RW */ 1317 unsigned long force_lock_nop:1; /* RW */
991 unsigned long qpi_agent_presence_vector : 3; /* RW */ 1318 unsigned long qpi_agent_presence_vector:3; /* RW */
992 unsigned long descriptor_fetch_mode : 1; /* RW */ 1319 unsigned long descriptor_fetch_mode:1; /* RW */
993 unsigned long enable_intd_soft_ack_mode : 1; /* RW */ 1320 unsigned long enable_intd_soft_ack_mode:1; /* RW */
994 unsigned long intd_soft_ack_timeout_period : 4; /* RW */ 1321 unsigned long intd_soft_ack_timeout_period:4; /* RW */
995 unsigned long enable_dual_mapping_mode : 1; /* RW */ 1322 unsigned long enable_dual_mapping_mode:1; /* RW */
996 unsigned long vga_io_port_decode_enable : 1; /* RW */ 1323 unsigned long vga_io_port_decode_enable:1; /* RW */
997 unsigned long vga_io_port_16_bit_decode : 1; /* RW */ 1324 unsigned long vga_io_port_16_bit_decode:1; /* RW */
998 unsigned long suppress_dest_registration : 1; /* RW */ 1325 unsigned long suppress_dest_registration:1; /* RW */
999 unsigned long programmed_initial_priority : 3; /* RW */ 1326 unsigned long programmed_initial_priority:3; /* RW */
1000 unsigned long use_incoming_priority : 1; /* RW */ 1327 unsigned long use_incoming_priority:1; /* RW */
1001 unsigned long enable_programmed_initial_priority : 1; /* RW */ 1328 unsigned long enable_programmed_initial_priority:1;/* RW */
1002 unsigned long rsvd_29_63 : 35; 1329 unsigned long rsvd_29_63:35;
1003 } s; 1330 } s;
1004 struct uv1h_lb_bau_misc_control_s { 1331 struct uv1h_lb_bau_misc_control_s {
1005 unsigned long rejection_delay : 8; /* RW */ 1332 unsigned long rejection_delay:8; /* RW */
1006 unsigned long apic_mode : 1; /* RW */ 1333 unsigned long apic_mode:1; /* RW */
1007 unsigned long force_broadcast : 1; /* RW */ 1334 unsigned long force_broadcast:1; /* RW */
1008 unsigned long force_lock_nop : 1; /* RW */ 1335 unsigned long force_lock_nop:1; /* RW */
1009 unsigned long qpi_agent_presence_vector : 3; /* RW */ 1336 unsigned long qpi_agent_presence_vector:3; /* RW */
1010 unsigned long descriptor_fetch_mode : 1; /* RW */ 1337 unsigned long descriptor_fetch_mode:1; /* RW */
1011 unsigned long enable_intd_soft_ack_mode : 1; /* RW */ 1338 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1012 unsigned long intd_soft_ack_timeout_period : 4; /* RW */ 1339 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1013 unsigned long enable_dual_mapping_mode : 1; /* RW */ 1340 unsigned long enable_dual_mapping_mode:1; /* RW */
1014 unsigned long vga_io_port_decode_enable : 1; /* RW */ 1341 unsigned long vga_io_port_decode_enable:1; /* RW */
1015 unsigned long vga_io_port_16_bit_decode : 1; /* RW */ 1342 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1016 unsigned long suppress_dest_registration : 1; /* RW */ 1343 unsigned long suppress_dest_registration:1; /* RW */
1017 unsigned long programmed_initial_priority : 3; /* RW */ 1344 unsigned long programmed_initial_priority:3; /* RW */
1018 unsigned long use_incoming_priority : 1; /* RW */ 1345 unsigned long use_incoming_priority:1; /* RW */
1019 unsigned long enable_programmed_initial_priority : 1; /* RW */ 1346 unsigned long enable_programmed_initial_priority:1;/* RW */
1020 unsigned long rsvd_29_47 : 19; /* */ 1347 unsigned long rsvd_29_47:19;
1021 unsigned long fun : 16; /* RW */ 1348 unsigned long fun:16; /* RW */
1022 } s1; 1349 } s1;
1023 struct uv2h_lb_bau_misc_control_s { 1350 struct uv2h_lb_bau_misc_control_s {
1024 unsigned long rejection_delay : 8; /* RW */ 1351 unsigned long rejection_delay:8; /* RW */
1025 unsigned long apic_mode : 1; /* RW */ 1352 unsigned long apic_mode:1; /* RW */
1026 unsigned long force_broadcast : 1; /* RW */ 1353 unsigned long force_broadcast:1; /* RW */
1027 unsigned long force_lock_nop : 1; /* RW */ 1354 unsigned long force_lock_nop:1; /* RW */
1028 unsigned long qpi_agent_presence_vector : 3; /* RW */ 1355 unsigned long qpi_agent_presence_vector:3; /* RW */
1029 unsigned long descriptor_fetch_mode : 1; /* RW */ 1356 unsigned long descriptor_fetch_mode:1; /* RW */
1030 unsigned long enable_intd_soft_ack_mode : 1; /* RW */ 1357 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1031 unsigned long intd_soft_ack_timeout_period : 4; /* RW */ 1358 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1032 unsigned long enable_dual_mapping_mode : 1; /* RW */ 1359 unsigned long enable_dual_mapping_mode:1; /* RW */
1033 unsigned long vga_io_port_decode_enable : 1; /* RW */ 1360 unsigned long vga_io_port_decode_enable:1; /* RW */
1034 unsigned long vga_io_port_16_bit_decode : 1; /* RW */ 1361 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1035 unsigned long suppress_dest_registration : 1; /* RW */ 1362 unsigned long suppress_dest_registration:1; /* RW */
1036 unsigned long programmed_initial_priority : 3; /* RW */ 1363 unsigned long programmed_initial_priority:3; /* RW */
1037 unsigned long use_incoming_priority : 1; /* RW */ 1364 unsigned long use_incoming_priority:1; /* RW */
1038 unsigned long enable_programmed_initial_priority : 1; /* RW */ 1365 unsigned long enable_programmed_initial_priority:1;/* RW */
1039 unsigned long enable_automatic_apic_mode_selection : 1; /* RW */ 1366 unsigned long enable_automatic_apic_mode_selection:1;/* RW */
1040 unsigned long apic_mode_status : 1; /* RO */ 1367 unsigned long apic_mode_status:1; /* RO */
1041 unsigned long suppress_interrupts_to_self : 1; /* RW */ 1368 unsigned long suppress_interrupts_to_self:1; /* RW */
1042 unsigned long enable_lock_based_system_flush : 1; /* RW */ 1369 unsigned long enable_lock_based_system_flush:1;/* RW */
1043 unsigned long enable_extended_sb_status : 1; /* RW */ 1370 unsigned long enable_extended_sb_status:1; /* RW */
1044 unsigned long suppress_int_prio_udt_to_self : 1; /* RW */ 1371 unsigned long suppress_int_prio_udt_to_self:1;/* RW */
1045 unsigned long use_legacy_descriptor_formats : 1; /* RW */ 1372 unsigned long use_legacy_descriptor_formats:1;/* RW */
1046 unsigned long rsvd_36_47 : 12; /* */ 1373 unsigned long rsvd_36_47:12;
1047 unsigned long fun : 16; /* RW */ 1374 unsigned long fun:16; /* RW */
1048 } s2; 1375 } s2;
1049}; 1376};
1050 1377
1051/* ========================================================================= */ 1378/* ========================================================================= */
1052/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 1379/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
1053/* ========================================================================= */ 1380/* ========================================================================= */
1054#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 1381#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
1055#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 1382#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
1056 1383
1057#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 1384#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
1058#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL 1385#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
1059#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 1386#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
1060#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL 1387#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
1061#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 1388#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
1062#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL 1389#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
1063 1390
1064union uvh_lb_bau_sb_activation_control_u { 1391union uvh_lb_bau_sb_activation_control_u {
1065 unsigned long v; 1392 unsigned long v;
1066 struct uvh_lb_bau_sb_activation_control_s { 1393 struct uvh_lb_bau_sb_activation_control_s {
1067 unsigned long index : 6; /* RW */ 1394 unsigned long index:6; /* RW */
1068 unsigned long rsvd_6_61: 56; /* */ 1395 unsigned long rsvd_6_61:56;
1069 unsigned long push : 1; /* WP */ 1396 unsigned long push:1; /* WP */
1070 unsigned long init : 1; /* WP */ 1397 unsigned long init:1; /* WP */
1071 } s; 1398 } s;
1072}; 1399};
1073 1400
1074/* ========================================================================= */ 1401/* ========================================================================= */
1075/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 1402/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
1076/* ========================================================================= */ 1403/* ========================================================================= */
1077#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 1404#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
1078#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 1405#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
1079 1406
1080#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 1407#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
1081#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 1408#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
1082 1409
1083union uvh_lb_bau_sb_activation_status_0_u { 1410union uvh_lb_bau_sb_activation_status_0_u {
1084 unsigned long v; 1411 unsigned long v;
1085 struct uvh_lb_bau_sb_activation_status_0_s { 1412 struct uvh_lb_bau_sb_activation_status_0_s {
1086 unsigned long status : 64; /* RW */ 1413 unsigned long status:64; /* RW */
1087 } s; 1414 } s;
1088}; 1415};
1089 1416
1090/* ========================================================================= */ 1417/* ========================================================================= */
1091/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 1418/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
1092/* ========================================================================= */ 1419/* ========================================================================= */
1093#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 1420#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
1094#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 1421#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
1095 1422
1096#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 1423#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
1097#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 1424#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
1098 1425
1099union uvh_lb_bau_sb_activation_status_1_u { 1426union uvh_lb_bau_sb_activation_status_1_u {
1100 unsigned long v; 1427 unsigned long v;
1101 struct uvh_lb_bau_sb_activation_status_1_s { 1428 struct uvh_lb_bau_sb_activation_status_1_s {
1102 unsigned long status : 64; /* RW */ 1429 unsigned long status:64; /* RW */
1103 } s; 1430 } s;
1104}; 1431};
1105 1432
1106/* ========================================================================= */ 1433/* ========================================================================= */
1107/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 1434/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
1108/* ========================================================================= */ 1435/* ========================================================================= */
1109#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 1436#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
1110#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 1437#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
1111 1438
1112#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 1439#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
1113#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL 1440#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
1114#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 1441#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
1115#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL 1442#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
1116 1443
1117union uvh_lb_bau_sb_descriptor_base_u { 1444union uvh_lb_bau_sb_descriptor_base_u {
1118 unsigned long v; 1445 unsigned long v;
1119 struct uvh_lb_bau_sb_descriptor_base_s { 1446 struct uvh_lb_bau_sb_descriptor_base_s {
1120 unsigned long rsvd_0_11 : 12; /* */ 1447 unsigned long rsvd_0_11:12;
1121 unsigned long page_address : 31; /* RW */ 1448 unsigned long page_address:31; /* RW */
1122 unsigned long rsvd_43_48 : 6; /* */ 1449 unsigned long rsvd_43_48:6;
1123 unsigned long node_id : 14; /* RW */ 1450 unsigned long node_id:14; /* RW */
1124 unsigned long rsvd_63 : 1; /* */ 1451 unsigned long rsvd_63:1;
1125 } s; 1452 } s;
1126}; 1453};
1127 1454
1128/* ========================================================================= */ 1455/* ========================================================================= */
1129/* UVH_NODE_ID */ 1456/* UVH_NODE_ID */
1130/* ========================================================================= */ 1457/* ========================================================================= */
1131#define UVH_NODE_ID 0x0UL 1458#define UVH_NODE_ID 0x0UL
1132 1459
1133#define UVH_NODE_ID_FORCE1_SHFT 0 1460#define UVH_NODE_ID_FORCE1_SHFT 0
1134#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL 1461#define UVH_NODE_ID_MANUFACTURER_SHFT 1
1135#define UVH_NODE_ID_MANUFACTURER_SHFT 1 1462#define UVH_NODE_ID_PART_NUMBER_SHFT 12
1136#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL 1463#define UVH_NODE_ID_REVISION_SHFT 28
1137#define UVH_NODE_ID_PART_NUMBER_SHFT 12 1464#define UVH_NODE_ID_NODE_ID_SHFT 32
1138#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL 1465#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
1139#define UVH_NODE_ID_REVISION_SHFT 28 1466#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
1140#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL 1467#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
1141#define UVH_NODE_ID_NODE_ID_SHFT 32 1468#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
1142#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL 1469#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
1143 1470
1144#define UV1H_NODE_ID_FORCE1_SHFT 0 1471#define UV1H_NODE_ID_FORCE1_SHFT 0
1145#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL 1472#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
1146#define UV1H_NODE_ID_MANUFACTURER_SHFT 1 1473#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
1147#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL 1474#define UV1H_NODE_ID_REVISION_SHFT 28
1148#define UV1H_NODE_ID_PART_NUMBER_SHFT 12 1475#define UV1H_NODE_ID_NODE_ID_SHFT 32
1149#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL 1476#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
1150#define UV1H_NODE_ID_REVISION_SHFT 28 1477#define UV1H_NODE_ID_NI_PORT_SHFT 56
1151#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL 1478#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
1152#define UV1H_NODE_ID_NODE_ID_SHFT 32 1479#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
1153#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL 1480#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
1154#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48 1481#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
1155#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL 1482#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
1156#define UV1H_NODE_ID_NI_PORT_SHFT 56 1483#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
1157#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL 1484#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
1158 1485
1159#define UV2H_NODE_ID_FORCE1_SHFT 0 1486#define UV2H_NODE_ID_FORCE1_SHFT 0
1160#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL 1487#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
1161#define UV2H_NODE_ID_MANUFACTURER_SHFT 1 1488#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
1162#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL 1489#define UV2H_NODE_ID_REVISION_SHFT 28
1163#define UV2H_NODE_ID_PART_NUMBER_SHFT 12 1490#define UV2H_NODE_ID_NODE_ID_SHFT 32
1164#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL 1491#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
1165#define UV2H_NODE_ID_REVISION_SHFT 28 1492#define UV2H_NODE_ID_NI_PORT_SHFT 57
1166#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL 1493#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
1167#define UV2H_NODE_ID_NODE_ID_SHFT 32 1494#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
1168#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL 1495#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
1169#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50 1496#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
1170#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL 1497#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
1171#define UV2H_NODE_ID_NI_PORT_SHFT 57 1498#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
1172#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL 1499#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
1173 1500
1174union uvh_node_id_u { 1501union uvh_node_id_u {
1175 unsigned long v; 1502 unsigned long v;
1176 struct uvh_node_id_s { 1503 struct uvh_node_id_s {
1177 unsigned long force1 : 1; /* RO */ 1504 unsigned long force1:1; /* RO */
1178 unsigned long manufacturer : 11; /* RO */ 1505 unsigned long manufacturer:11; /* RO */
1179 unsigned long part_number : 16; /* RO */ 1506 unsigned long part_number:16; /* RO */
1180 unsigned long revision : 4; /* RO */ 1507 unsigned long revision:4; /* RO */
1181 unsigned long node_id : 15; /* RW */ 1508 unsigned long node_id:15; /* RW */
1182 unsigned long rsvd_47_63 : 17; 1509 unsigned long rsvd_47_63:17;
1183 } s; 1510 } s;
1184 struct uv1h_node_id_s { 1511 struct uv1h_node_id_s {
1185 unsigned long force1 : 1; /* RO */ 1512 unsigned long force1:1; /* RO */
1186 unsigned long manufacturer : 11; /* RO */ 1513 unsigned long manufacturer:11; /* RO */
1187 unsigned long part_number : 16; /* RO */ 1514 unsigned long part_number:16; /* RO */
1188 unsigned long revision : 4; /* RO */ 1515 unsigned long revision:4; /* RO */
1189 unsigned long node_id : 15; /* RW */ 1516 unsigned long node_id:15; /* RW */
1190 unsigned long rsvd_47 : 1; /* */ 1517 unsigned long rsvd_47:1;
1191 unsigned long nodes_per_bit : 7; /* RW */ 1518 unsigned long nodes_per_bit:7; /* RW */
1192 unsigned long rsvd_55 : 1; /* */ 1519 unsigned long rsvd_55:1;
1193 unsigned long ni_port : 4; /* RO */ 1520 unsigned long ni_port:4; /* RO */
1194 unsigned long rsvd_60_63 : 4; /* */ 1521 unsigned long rsvd_60_63:4;
1195 } s1; 1522 } s1;
1196 struct uv2h_node_id_s { 1523 struct uv2h_node_id_s {
1197 unsigned long force1 : 1; /* RO */ 1524 unsigned long force1:1; /* RO */
1198 unsigned long manufacturer : 11; /* RO */ 1525 unsigned long manufacturer:11; /* RO */
1199 unsigned long part_number : 16; /* RO */ 1526 unsigned long part_number:16; /* RO */
1200 unsigned long revision : 4; /* RO */ 1527 unsigned long revision:4; /* RO */
1201 unsigned long node_id : 15; /* RW */ 1528 unsigned long node_id:15; /* RW */
1202 unsigned long rsvd_47_49 : 3; /* */ 1529 unsigned long rsvd_47_49:3;
1203 unsigned long nodes_per_bit : 7; /* RO */ 1530 unsigned long nodes_per_bit:7; /* RO */
1204 unsigned long ni_port : 5; /* RO */ 1531 unsigned long ni_port:5; /* RO */
1205 unsigned long rsvd_62_63 : 2; /* */ 1532 unsigned long rsvd_62_63:2;
1206 } s2; 1533 } s2;
1207}; 1534};
1208 1535
1209/* ========================================================================= */ 1536/* ========================================================================= */
1210/* UVH_NODE_PRESENT_TABLE */ 1537/* UVH_NODE_PRESENT_TABLE */
1211/* ========================================================================= */ 1538/* ========================================================================= */
1212#define UVH_NODE_PRESENT_TABLE 0x1400UL 1539#define UVH_NODE_PRESENT_TABLE 0x1400UL
1213#define UVH_NODE_PRESENT_TABLE_DEPTH 16 1540#define UVH_NODE_PRESENT_TABLE_DEPTH 16
1214 1541
1215#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 1542#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
1216#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL 1543#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
1217 1544
1218union uvh_node_present_table_u { 1545union uvh_node_present_table_u {
1219 unsigned long v; 1546 unsigned long v;
1220 struct uvh_node_present_table_s { 1547 struct uvh_node_present_table_s {
1221 unsigned long nodes : 64; /* RW */ 1548 unsigned long nodes:64; /* RW */
1222 } s; 1549 } s;
1223}; 1550};
1224 1551
1225/* ========================================================================= */ 1552/* ========================================================================= */
1226/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ 1553/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
1227/* ========================================================================= */ 1554/* ========================================================================= */
1228#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL 1555#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
1229 1556
1230#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 1557#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
1231#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
1232#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 1558#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
1233#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
1234#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 1559#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
1560#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
1561#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
1235#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL 1562#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
1236 1563
1237union uvh_rh_gam_alias210_overlay_config_0_mmr_u { 1564union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
1238 unsigned long v; 1565 unsigned long v;
1239 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s { 1566 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
1240 unsigned long rsvd_0_23: 24; /* */ 1567 unsigned long rsvd_0_23:24;
1241 unsigned long base : 8; /* RW */ 1568 unsigned long base:8; /* RW */
1242 unsigned long rsvd_32_47: 16; /* */ 1569 unsigned long rsvd_32_47:16;
1243 unsigned long m_alias : 5; /* RW */ 1570 unsigned long m_alias:5; /* RW */
1244 unsigned long rsvd_53_62: 10; /* */ 1571 unsigned long rsvd_53_62:10;
1245 unsigned long enable : 1; /* RW */ 1572 unsigned long enable:1; /* RW */
1246 } s; 1573 } s;
1247}; 1574};
1248 1575
1249/* ========================================================================= */ 1576/* ========================================================================= */
1250/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ 1577/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
1251/* ========================================================================= */ 1578/* ========================================================================= */
1252#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL 1579#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
1253 1580
1254#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 1581#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
1255#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
1256#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 1582#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
1257#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
1258#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 1583#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
1584#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
1585#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
1259#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL 1586#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
1260 1587
1261union uvh_rh_gam_alias210_overlay_config_1_mmr_u { 1588union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
1262 unsigned long v; 1589 unsigned long v;
1263 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s { 1590 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
1264 unsigned long rsvd_0_23: 24; /* */ 1591 unsigned long rsvd_0_23:24;
1265 unsigned long base : 8; /* RW */ 1592 unsigned long base:8; /* RW */
1266 unsigned long rsvd_32_47: 16; /* */ 1593 unsigned long rsvd_32_47:16;
1267 unsigned long m_alias : 5; /* RW */ 1594 unsigned long m_alias:5; /* RW */
1268 unsigned long rsvd_53_62: 10; /* */ 1595 unsigned long rsvd_53_62:10;
1269 unsigned long enable : 1; /* RW */ 1596 unsigned long enable:1; /* RW */
1270 } s; 1597 } s;
1271}; 1598};
1272 1599
1273/* ========================================================================= */ 1600/* ========================================================================= */
1274/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ 1601/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
1275/* ========================================================================= */ 1602/* ========================================================================= */
1276#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL 1603#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
1277 1604
1278#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 1605#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
1279#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
1280#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 1606#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
1281#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
1282#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 1607#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
1608#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
1609#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
1283#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL 1610#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
1284 1611
1285union uvh_rh_gam_alias210_overlay_config_2_mmr_u { 1612union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
1286 unsigned long v; 1613 unsigned long v;
1287 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s { 1614 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
1288 unsigned long rsvd_0_23: 24; /* */ 1615 unsigned long rsvd_0_23:24;
1289 unsigned long base : 8; /* RW */ 1616 unsigned long base:8; /* RW */
1290 unsigned long rsvd_32_47: 16; /* */ 1617 unsigned long rsvd_32_47:16;
1291 unsigned long m_alias : 5; /* RW */ 1618 unsigned long m_alias:5; /* RW */
1292 unsigned long rsvd_53_62: 10; /* */ 1619 unsigned long rsvd_53_62:10;
1293 unsigned long enable : 1; /* RW */ 1620 unsigned long enable:1; /* RW */
1294 } s; 1621 } s;
1295}; 1622};
1296 1623
1297/* ========================================================================= */ 1624/* ========================================================================= */
1298/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 1625/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
1299/* ========================================================================= */ 1626/* ========================================================================= */
1300#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 1627#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
1301 1628
1302#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 1629#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
1303#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL 1630#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
1304 1631
1305union uvh_rh_gam_alias210_redirect_config_0_mmr_u { 1632union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
1306 unsigned long v; 1633 unsigned long v;
1307 struct uvh_rh_gam_alias210_redirect_config_0_mmr_s { 1634 struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
1308 unsigned long rsvd_0_23 : 24; /* */ 1635 unsigned long rsvd_0_23:24;
1309 unsigned long dest_base : 22; /* RW */ 1636 unsigned long dest_base:22; /* RW */
1310 unsigned long rsvd_46_63: 18; /* */ 1637 unsigned long rsvd_46_63:18;
1311 } s; 1638 } s;
1312}; 1639};
1313 1640
1314/* ========================================================================= */ 1641/* ========================================================================= */
1315/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ 1642/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
1316/* ========================================================================= */ 1643/* ========================================================================= */
1317#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL 1644#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
1318 1645
1319#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 1646#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
1320#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL 1647#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
1321 1648
1322union uvh_rh_gam_alias210_redirect_config_1_mmr_u { 1649union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
1323 unsigned long v; 1650 unsigned long v;
1324 struct uvh_rh_gam_alias210_redirect_config_1_mmr_s { 1651 struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
1325 unsigned long rsvd_0_23 : 24; /* */ 1652 unsigned long rsvd_0_23:24;
1326 unsigned long dest_base : 22; /* RW */ 1653 unsigned long dest_base:22; /* RW */
1327 unsigned long rsvd_46_63: 18; /* */ 1654 unsigned long rsvd_46_63:18;
1328 } s; 1655 } s;
1329}; 1656};
1330 1657
1331/* ========================================================================= */ 1658/* ========================================================================= */
1332/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ 1659/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
1333/* ========================================================================= */ 1660/* ========================================================================= */
1334#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL 1661#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
1335 1662
1336#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 1663#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
1337#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL 1664#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
1338 1665
1339union uvh_rh_gam_alias210_redirect_config_2_mmr_u { 1666union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1340 unsigned long v; 1667 unsigned long v;
1341 struct uvh_rh_gam_alias210_redirect_config_2_mmr_s { 1668 struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
1342 unsigned long rsvd_0_23 : 24; /* */ 1669 unsigned long rsvd_0_23:24;
1343 unsigned long dest_base : 22; /* RW */ 1670 unsigned long dest_base:22; /* RW */
1344 unsigned long rsvd_46_63: 18; /* */ 1671 unsigned long rsvd_46_63:18;
1345 } s; 1672 } s;
1346}; 1673};
1347 1674
1348/* ========================================================================= */ 1675/* ========================================================================= */
1349/* UVH_RH_GAM_CONFIG_MMR */ 1676/* UVH_RH_GAM_CONFIG_MMR */
1350/* ========================================================================= */ 1677/* ========================================================================= */
1351#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL 1678#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
1352 1679
1353#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 1680#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1354#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL 1681#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1355#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 1682#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1356#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 1683#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1357 1684
1358#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 1685#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1359#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL 1686#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1360#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 1687#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
1361#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 1688#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1362#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12 1689#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1363#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL 1690#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
1364 1691
1365#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 1692#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1366#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL 1693#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1367#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 1694#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1368#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 1695#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1369 1696
1370union uvh_rh_gam_config_mmr_u { 1697union uvh_rh_gam_config_mmr_u {
1371 unsigned long v; 1698 unsigned long v;
1372 struct uvh_rh_gam_config_mmr_s { 1699 struct uvh_rh_gam_config_mmr_s {
1373 unsigned long m_skt : 6; /* RW */ 1700 unsigned long m_skt:6; /* RW */
1374 unsigned long n_skt : 4; /* RW */ 1701 unsigned long n_skt:4; /* RW */
1375 unsigned long rsvd_10_63 : 54; 1702 unsigned long rsvd_10_63:54;
1376 } s; 1703 } s;
1377 struct uv1h_rh_gam_config_mmr_s { 1704 struct uv1h_rh_gam_config_mmr_s {
1378 unsigned long m_skt : 6; /* RW */ 1705 unsigned long m_skt:6; /* RW */
1379 unsigned long n_skt : 4; /* RW */ 1706 unsigned long n_skt:4; /* RW */
1380 unsigned long rsvd_10_11: 2; /* */ 1707 unsigned long rsvd_10_11:2;
1381 unsigned long mmiol_cfg : 1; /* RW */ 1708 unsigned long mmiol_cfg:1; /* RW */
1382 unsigned long rsvd_13_63: 51; /* */ 1709 unsigned long rsvd_13_63:51;
1383 } s1; 1710 } s1;
1384 struct uv2h_rh_gam_config_mmr_s { 1711 struct uv2h_rh_gam_config_mmr_s {
1385 unsigned long m_skt : 6; /* RW */ 1712 unsigned long m_skt:6; /* RW */
1386 unsigned long n_skt : 4; /* RW */ 1713 unsigned long n_skt:4; /* RW */
1387 unsigned long rsvd_10_63: 54; /* */ 1714 unsigned long rsvd_10_63:54;
1388 } s2; 1715 } s2;
1389}; 1716};
1390 1717
1391/* ========================================================================= */ 1718/* ========================================================================= */
1392/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 1719/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
1393/* ========================================================================= */ 1720/* ========================================================================= */
1394#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 1721#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
1395 1722
1396#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 1723#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1397#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 1724#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
1398 1725
1399#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 1726#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1400#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 1727#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
1401#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 1728#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1402#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL 1729#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1403#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 1730#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
1404#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 1731#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
1405#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1732#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1406#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1733#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1407 1734
1408#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 1735#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1409#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 1736#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1410#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 1737#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1411#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 1738#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
1412#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1739#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1413#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1740#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1414 1741
1415union uvh_rh_gam_gru_overlay_config_mmr_u { 1742union uvh_rh_gam_gru_overlay_config_mmr_u {
1416 unsigned long v; 1743 unsigned long v;
1417 struct uvh_rh_gam_gru_overlay_config_mmr_s { 1744 struct uvh_rh_gam_gru_overlay_config_mmr_s {
1418 unsigned long rsvd_0_27: 28; /* */ 1745 unsigned long rsvd_0_27:28;
1419 unsigned long base : 18; /* RW */ 1746 unsigned long base:18; /* RW */
1420 unsigned long rsvd_46_62 : 17; 1747 unsigned long rsvd_46_62:17;
1421 unsigned long enable : 1; /* RW */ 1748 unsigned long enable:1; /* RW */
1422 } s; 1749 } s;
1423 struct uv1h_rh_gam_gru_overlay_config_mmr_s { 1750 struct uv1h_rh_gam_gru_overlay_config_mmr_s {
1424 unsigned long rsvd_0_27: 28; /* */ 1751 unsigned long rsvd_0_27:28;
1425 unsigned long base : 18; /* RW */ 1752 unsigned long base:18; /* RW */
1426 unsigned long rsvd_46_47: 2; /* */ 1753 unsigned long rsvd_46_47:2;
1427 unsigned long gr4 : 1; /* RW */ 1754 unsigned long gr4:1; /* RW */
1428 unsigned long rsvd_49_51: 3; /* */ 1755 unsigned long rsvd_49_51:3;
1429 unsigned long n_gru : 4; /* RW */ 1756 unsigned long n_gru:4; /* RW */
1430 unsigned long rsvd_56_62: 7; /* */ 1757 unsigned long rsvd_56_62:7;
1431 unsigned long enable : 1; /* RW */ 1758 unsigned long enable:1; /* RW */
1432 } s1; 1759 } s1;
1433 struct uv2h_rh_gam_gru_overlay_config_mmr_s { 1760 struct uv2h_rh_gam_gru_overlay_config_mmr_s {
1434 unsigned long rsvd_0_27: 28; /* */ 1761 unsigned long rsvd_0_27:28;
1435 unsigned long base : 18; /* RW */ 1762 unsigned long base:18; /* RW */
1436 unsigned long rsvd_46_51: 6; /* */ 1763 unsigned long rsvd_46_51:6;
1437 unsigned long n_gru : 4; /* RW */ 1764 unsigned long n_gru:4; /* RW */
1438 unsigned long rsvd_56_62: 7; /* */ 1765 unsigned long rsvd_56_62:7;
1439 unsigned long enable : 1; /* RW */ 1766 unsigned long enable:1; /* RW */
1440 } s2; 1767 } s2;
1441}; 1768};
1442 1769
1443/* ========================================================================= */ 1770/* ========================================================================= */
1444/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ 1771/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1445/* ========================================================================= */ 1772/* ========================================================================= */
1446#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL 1773#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1447 1774
1448#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 1775#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1449#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL 1776#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1450#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 1777#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1451#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1452#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1453#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1454#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1778#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1779#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
1780#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1781#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1455#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1782#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1456 1783
1457#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27 1784#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
1458#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL 1785#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1459#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 1786#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1460#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1461#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1462#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1463#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1787#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1788#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
1789#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1790#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1464#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1791#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1465 1792
1466union uvh_rh_gam_mmioh_overlay_config_mmr_u { 1793union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1467 unsigned long v; 1794 unsigned long v;
1468 struct uv1h_rh_gam_mmioh_overlay_config_mmr_s { 1795 struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
1469 unsigned long rsvd_0_29: 30; /* */ 1796 unsigned long rsvd_0_29:30;
1470 unsigned long base : 16; /* RW */ 1797 unsigned long base:16; /* RW */
1471 unsigned long m_io : 6; /* RW */ 1798 unsigned long m_io:6; /* RW */
1472 unsigned long n_io : 4; /* RW */ 1799 unsigned long n_io:4; /* RW */
1473 unsigned long rsvd_56_62: 7; /* */ 1800 unsigned long rsvd_56_62:7;
1474 unsigned long enable : 1; /* RW */ 1801 unsigned long enable:1; /* RW */
1475 } s1; 1802 } s1;
1476 struct uv2h_rh_gam_mmioh_overlay_config_mmr_s { 1803 struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
1477 unsigned long rsvd_0_26: 27; /* */ 1804 unsigned long rsvd_0_26:27;
1478 unsigned long base : 19; /* RW */ 1805 unsigned long base:19; /* RW */
1479 unsigned long m_io : 6; /* RW */ 1806 unsigned long m_io:6; /* RW */
1480 unsigned long n_io : 4; /* RW */ 1807 unsigned long n_io:4; /* RW */
1481 unsigned long rsvd_56_62: 7; /* */ 1808 unsigned long rsvd_56_62:7;
1482 unsigned long enable : 1; /* RW */ 1809 unsigned long enable:1; /* RW */
1483 } s2; 1810 } s2;
1484}; 1811};
1485 1812
1486/* ========================================================================= */ 1813/* ========================================================================= */
1487/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ 1814/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
1488/* ========================================================================= */ 1815/* ========================================================================= */
1489#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL 1816#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
1490 1817
1491#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 1818#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1492#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 1819#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1493 1820
1494#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 1821#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1495#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1496#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 1822#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
1823#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1824#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1497#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL 1825#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
1498#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1826#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1499#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1500 1827
1501#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 1828#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1502#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 1829#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1503#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1830#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1504#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1831#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1505 1832
1506union uvh_rh_gam_mmr_overlay_config_mmr_u { 1833union uvh_rh_gam_mmr_overlay_config_mmr_u {
1507 unsigned long v; 1834 unsigned long v;
1508 struct uvh_rh_gam_mmr_overlay_config_mmr_s { 1835 struct uvh_rh_gam_mmr_overlay_config_mmr_s {
1509 unsigned long rsvd_0_25: 26; /* */ 1836 unsigned long rsvd_0_25:26;
1510 unsigned long base : 20; /* RW */ 1837 unsigned long base:20; /* RW */
1511 unsigned long rsvd_46_62 : 17; 1838 unsigned long rsvd_46_62:17;
1512 unsigned long enable : 1; /* RW */ 1839 unsigned long enable:1; /* RW */
1513 } s; 1840 } s;
1514 struct uv1h_rh_gam_mmr_overlay_config_mmr_s { 1841 struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
1515 unsigned long rsvd_0_25: 26; /* */ 1842 unsigned long rsvd_0_25:26;
1516 unsigned long base : 20; /* RW */ 1843 unsigned long base:20; /* RW */
1517 unsigned long dual_hub : 1; /* RW */ 1844 unsigned long dual_hub:1; /* RW */
1518 unsigned long rsvd_47_62: 16; /* */ 1845 unsigned long rsvd_47_62:16;
1519 unsigned long enable : 1; /* RW */ 1846 unsigned long enable:1; /* RW */
1520 } s1; 1847 } s1;
1521 struct uv2h_rh_gam_mmr_overlay_config_mmr_s { 1848 struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
1522 unsigned long rsvd_0_25: 26; /* */ 1849 unsigned long rsvd_0_25:26;
1523 unsigned long base : 20; /* RW */ 1850 unsigned long base:20; /* RW */
1524 unsigned long rsvd_46_62: 17; /* */ 1851 unsigned long rsvd_46_62:17;
1525 unsigned long enable : 1; /* RW */ 1852 unsigned long enable:1; /* RW */
1526 } s2; 1853 } s2;
1527}; 1854};
1528 1855
1529/* ========================================================================= */ 1856/* ========================================================================= */
1530/* UVH_RTC */ 1857/* UVH_RTC */
1531/* ========================================================================= */ 1858/* ========================================================================= */
1532#define UVH_RTC 0x340000UL 1859#define UVH_RTC 0x340000UL
1533 1860
1534#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 1861#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
1535#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 1862#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
1536 1863
1537union uvh_rtc_u { 1864union uvh_rtc_u {
1538 unsigned long v; 1865 unsigned long v;
1539 struct uvh_rtc_s { 1866 struct uvh_rtc_s {
1540 unsigned long real_time_clock : 56; /* RW */ 1867 unsigned long real_time_clock:56; /* RW */
1541 unsigned long rsvd_56_63 : 8; /* */ 1868 unsigned long rsvd_56_63:8;
1542 } s; 1869 } s;
1543}; 1870};
1544 1871
1545/* ========================================================================= */ 1872/* ========================================================================= */
1546/* UVH_RTC1_INT_CONFIG */ 1873/* UVH_RTC1_INT_CONFIG */
1547/* ========================================================================= */ 1874/* ========================================================================= */
1548#define UVH_RTC1_INT_CONFIG 0x615c0UL 1875#define UVH_RTC1_INT_CONFIG 0x615c0UL
1549 1876
1550#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 1877#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1551#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL 1878#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
1552#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 1879#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
1553#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL 1880#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
1554#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11 1881#define UVH_RTC1_INT_CONFIG_P_SHFT 13
1555#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL 1882#define UVH_RTC1_INT_CONFIG_T_SHFT 15
1556#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12 1883#define UVH_RTC1_INT_CONFIG_M_SHFT 16
1557#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL 1884#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
1558#define UVH_RTC1_INT_CONFIG_P_SHFT 13 1885#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1559#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL 1886#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
1560#define UVH_RTC1_INT_CONFIG_T_SHFT 15 1887#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1561#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL 1888#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1562#define UVH_RTC1_INT_CONFIG_M_SHFT 16 1889#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
1563#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL 1890#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
1564#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32 1891#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
1565#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL 1892#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1566 1893
1567union uvh_rtc1_int_config_u { 1894union uvh_rtc1_int_config_u {
1568 unsigned long v; 1895 unsigned long v;
1569 struct uvh_rtc1_int_config_s { 1896 struct uvh_rtc1_int_config_s {
1570 unsigned long vector_ : 8; /* RW */ 1897 unsigned long vector_:8; /* RW */
1571 unsigned long dm : 3; /* RW */ 1898 unsigned long dm:3; /* RW */
1572 unsigned long destmode : 1; /* RW */ 1899 unsigned long destmode:1; /* RW */
1573 unsigned long status : 1; /* RO */ 1900 unsigned long status:1; /* RO */
1574 unsigned long p : 1; /* RO */ 1901 unsigned long p:1; /* RO */
1575 unsigned long rsvd_14 : 1; /* */ 1902 unsigned long rsvd_14:1;
1576 unsigned long t : 1; /* RO */ 1903 unsigned long t:1; /* RO */
1577 unsigned long m : 1; /* RW */ 1904 unsigned long m:1; /* RW */
1578 unsigned long rsvd_17_31: 15; /* */ 1905 unsigned long rsvd_17_31:15;
1579 unsigned long apic_id : 32; /* RW */ 1906 unsigned long apic_id:32; /* RW */
1580 } s; 1907 } s;
1581}; 1908};
1582 1909
1583/* ========================================================================= */ 1910/* ========================================================================= */
1584/* UVH_SCRATCH5 */ 1911/* UVH_SCRATCH5 */
1585/* ========================================================================= */ 1912/* ========================================================================= */
1586#define UVH_SCRATCH5 0x2d0200UL 1913#define UVH_SCRATCH5 0x2d0200UL
1587#define UVH_SCRATCH5_32 0x778 1914#define UVH_SCRATCH5_32 0x778
1588 1915
1589#define UVH_SCRATCH5_SCRATCH5_SHFT 0 1916#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1590#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL 1917#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
1591 1918
1592union uvh_scratch5_u { 1919union uvh_scratch5_u {
1593 unsigned long v; 1920 unsigned long v;
1594 struct uvh_scratch5_s { 1921 struct uvh_scratch5_s {
1595 unsigned long scratch5 : 64; /* RW, W1CS */ 1922 unsigned long scratch5:64; /* RW, W1CS */
1596 } s; 1923 } s;
1597}; 1924};
1598 1925
1599/* ========================================================================= */ 1926/* ========================================================================= */
1600/* UV2H_EVENT_OCCURRED2 */ 1927/* UV2H_EVENT_OCCURRED2 */
1601/* ========================================================================= */ 1928/* ========================================================================= */
1602#define UV2H_EVENT_OCCURRED2 0x70100UL 1929#define UV2H_EVENT_OCCURRED2 0x70100UL
1603#define UV2H_EVENT_OCCURRED2_32 0xb68 1930#define UV2H_EVENT_OCCURRED2_32 0xb68
1604 1931
1605#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 1932#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
1606#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL 1933#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
1607#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 1934#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
1608#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL 1935#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
1609#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 1936#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
1610#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL 1937#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
1611#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 1938#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
1612#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL 1939#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
1613#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 1940#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
1614#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL 1941#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
1615#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 1942#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
1616#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL 1943#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
1617#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 1944#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
1618#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL 1945#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
1619#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 1946#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
1620#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL 1947#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
1621#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 1948#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
1622#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL 1949#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
1623#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 1950#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
1624#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL 1951#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
1625#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 1952#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
1626#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL 1953#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
1627#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 1954#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
1628#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL 1955#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
1629#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 1956#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
1630#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL 1957#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
1631#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 1958#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
1632#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL 1959#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
1633#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 1960#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
1634#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL 1961#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
1635#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 1962#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
1636#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL 1963#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
1637#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 1964#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
1638#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL 1965#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
1639#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 1966#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
1640#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL 1967#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
1641#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 1968#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
1642#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL 1969#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
1643#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 1970#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
1644#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL 1971#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
1645#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 1972#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
1646#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL 1973#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
1647#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 1974#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
1648#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL 1975#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
1649#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 1976#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
1650#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL 1977#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
1651#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 1978#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
1652#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL 1979#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
1653#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 1980#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
1654#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL 1981#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
1655#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 1982#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
1656#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL 1983#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
1657#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 1984#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
1658#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL 1985#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
1659#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 1986#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
1660#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL 1987#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
1661#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 1988#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
1662#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL 1989#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
1663#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 1990#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
1664#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL 1991#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
1665#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 1992#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
1666#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL 1993#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
1667#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 1994#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
1668#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL 1995#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
1669 1996
1670union uv2h_event_occurred2_u { 1997union uv2h_event_occurred2_u {
1671 unsigned long v; 1998 unsigned long v;
1672 struct uv2h_event_occurred2_s { 1999 struct uv2h_event_occurred2_s {
1673 unsigned long rtc_0 : 1; /* RW */ 2000 unsigned long rtc_0:1; /* RW */
1674 unsigned long rtc_1 : 1; /* RW */ 2001 unsigned long rtc_1:1; /* RW */
1675 unsigned long rtc_2 : 1; /* RW */ 2002 unsigned long rtc_2:1; /* RW */
1676 unsigned long rtc_3 : 1; /* RW */ 2003 unsigned long rtc_3:1; /* RW */
1677 unsigned long rtc_4 : 1; /* RW */ 2004 unsigned long rtc_4:1; /* RW */
1678 unsigned long rtc_5 : 1; /* RW */ 2005 unsigned long rtc_5:1; /* RW */
1679 unsigned long rtc_6 : 1; /* RW */ 2006 unsigned long rtc_6:1; /* RW */
1680 unsigned long rtc_7 : 1; /* RW */ 2007 unsigned long rtc_7:1; /* RW */
1681 unsigned long rtc_8 : 1; /* RW */ 2008 unsigned long rtc_8:1; /* RW */
1682 unsigned long rtc_9 : 1; /* RW */ 2009 unsigned long rtc_9:1; /* RW */
1683 unsigned long rtc_10 : 1; /* RW */ 2010 unsigned long rtc_10:1; /* RW */
1684 unsigned long rtc_11 : 1; /* RW */ 2011 unsigned long rtc_11:1; /* RW */
1685 unsigned long rtc_12 : 1; /* RW */ 2012 unsigned long rtc_12:1; /* RW */
1686 unsigned long rtc_13 : 1; /* RW */ 2013 unsigned long rtc_13:1; /* RW */
1687 unsigned long rtc_14 : 1; /* RW */ 2014 unsigned long rtc_14:1; /* RW */
1688 unsigned long rtc_15 : 1; /* RW */ 2015 unsigned long rtc_15:1; /* RW */
1689 unsigned long rtc_16 : 1; /* RW */ 2016 unsigned long rtc_16:1; /* RW */
1690 unsigned long rtc_17 : 1; /* RW */ 2017 unsigned long rtc_17:1; /* RW */
1691 unsigned long rtc_18 : 1; /* RW */ 2018 unsigned long rtc_18:1; /* RW */
1692 unsigned long rtc_19 : 1; /* RW */ 2019 unsigned long rtc_19:1; /* RW */
1693 unsigned long rtc_20 : 1; /* RW */ 2020 unsigned long rtc_20:1; /* RW */
1694 unsigned long rtc_21 : 1; /* RW */ 2021 unsigned long rtc_21:1; /* RW */
1695 unsigned long rtc_22 : 1; /* RW */ 2022 unsigned long rtc_22:1; /* RW */
1696 unsigned long rtc_23 : 1; /* RW */ 2023 unsigned long rtc_23:1; /* RW */
1697 unsigned long rtc_24 : 1; /* RW */ 2024 unsigned long rtc_24:1; /* RW */
1698 unsigned long rtc_25 : 1; /* RW */ 2025 unsigned long rtc_25:1; /* RW */
1699 unsigned long rtc_26 : 1; /* RW */ 2026 unsigned long rtc_26:1; /* RW */
1700 unsigned long rtc_27 : 1; /* RW */ 2027 unsigned long rtc_27:1; /* RW */
1701 unsigned long rtc_28 : 1; /* RW */ 2028 unsigned long rtc_28:1; /* RW */
1702 unsigned long rtc_29 : 1; /* RW */ 2029 unsigned long rtc_29:1; /* RW */
1703 unsigned long rtc_30 : 1; /* RW */ 2030 unsigned long rtc_30:1; /* RW */
1704 unsigned long rtc_31 : 1; /* RW */ 2031 unsigned long rtc_31:1; /* RW */
1705 unsigned long rsvd_32_63: 32; /* */ 2032 unsigned long rsvd_32_63:32;
1706 } s1; 2033 } s1;
1707}; 2034};
1708 2035
1709/* ========================================================================= */ 2036/* ========================================================================= */
1710/* UV2H_EVENT_OCCURRED2_ALIAS */ 2037/* UV2H_EVENT_OCCURRED2_ALIAS */
1711/* ========================================================================= */ 2038/* ========================================================================= */
1712#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL 2039#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
1713#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70 2040#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
1714 2041
1715/* ========================================================================= */ 2042/* ========================================================================= */
1716/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */ 2043/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
1717/* ========================================================================= */ 2044/* ========================================================================= */
1718#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL 2045#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
1719#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 2046#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
1720 2047
1721#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 2048#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
1722#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL 2049#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
1723 2050
1724union uv2h_lb_bau_sb_activation_status_2_u { 2051union uv2h_lb_bau_sb_activation_status_2_u {
1725 unsigned long v; 2052 unsigned long v;
1726 struct uv2h_lb_bau_sb_activation_status_2_s { 2053 struct uv2h_lb_bau_sb_activation_status_2_s {
1727 unsigned long aux_error : 64; /* RW */ 2054 unsigned long aux_error:64; /* RW */
1728 } s1; 2055 } s1;
1729}; 2056};
1730 2057
1731/* ========================================================================= */ 2058/* ========================================================================= */
1732/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */ 2059/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
1733/* ========================================================================= */ 2060/* ========================================================================= */
1734#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL 2061#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
1735#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0 2062#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
1736 2063
1737#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0 2064#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
1738#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL 2065#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
1739 2066
1740union uv1h_lb_target_physical_apic_id_mask_u { 2067union uv1h_lb_target_physical_apic_id_mask_u {
1741 unsigned long v; 2068 unsigned long v;
1742 struct uv1h_lb_target_physical_apic_id_mask_s { 2069 struct uv1h_lb_target_physical_apic_id_mask_s {
1743 unsigned long bit_enables : 32; /* RW */ 2070 unsigned long bit_enables:32; /* RW */
1744 unsigned long rsvd_32_63 : 32; /* */ 2071 unsigned long rsvd_32_63:32;
1745 } s1; 2072 } s1;
1746}; 2073};
1747 2074
1748 2075
1749#endif /* __ASM_UV_MMRS_X86_H__ */ 2076#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 646b4c1ca695..815285bcaceb 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -11,10 +11,9 @@ struct vsyscall_gtod_data {
11 time_t wall_time_sec; 11 time_t wall_time_sec;
12 u32 wall_time_nsec; 12 u32 wall_time_nsec;
13 13
14 int sysctl_enabled;
15 struct timezone sys_tz; 14 struct timezone sys_tz;
16 struct { /* extract of a clocksource struct */ 15 struct { /* extract of a clocksource struct */
17 cycle_t (*vread)(void); 16 int vclock_mode;
18 cycle_t cycle_last; 17 cycle_t cycle_last;
19 cycle_t mask; 18 cycle_t mask;
20 u32 mult; 19 u32 mult;
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d55597351f6a..60107072c28b 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -16,10 +16,6 @@ enum vsyscall_num {
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17#include <linux/seqlock.h> 17#include <linux/seqlock.h>
18 18
19/* Definitions for CONFIG_GENERIC_TIME definitions */
20#define __vsyscall_fn \
21 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
22
23#define VGETCPU_RDTSCP 1 19#define VGETCPU_RDTSCP 1
24#define VGETCPU_LSL 2 20#define VGETCPU_LSL 2
25 21
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index 341b3559452b..de656ac2af41 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -10,15 +10,14 @@
10 * In normal kernel code, they are used like any other variable. 10 * In normal kernel code, they are used like any other variable.
11 * In user code, they are accessed through the VVAR macro. 11 * In user code, they are accessed through the VVAR macro.
12 * 12 *
13 * Each of these variables lives in the vsyscall page, and each 13 * These variables live in a page of kernel data that has an extra RO
14 * one needs a unique offset within the little piece of the page 14 * mapping for userspace. Each variable needs a unique offset within
15 * reserved for vvars. Specify that offset in DECLARE_VVAR. 15 * that page; specify that offset with the DECLARE_VVAR macro. (If
16 * (There are 896 bytes available. If you mess up, the linker will 16 * you mess up, the linker will catch it.)
17 * catch it.)
18 */ 17 */
19 18
20/* Offset of vars within vsyscall page */ 19/* Base address of vvars. This is not ABI. */
21#define VSYSCALL_VARS_OFFSET (3072 + 128) 20#define VVAR_ADDRESS (-10*1024*1024 - 4096)
22 21
23#if defined(__VVAR_KERNEL_LDS) 22#if defined(__VVAR_KERNEL_LDS)
24 23
@@ -26,17 +25,17 @@
26 * right place. 25 * right place.
27 */ 26 */
28#define DECLARE_VVAR(offset, type, name) \ 27#define DECLARE_VVAR(offset, type, name) \
29 EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset) 28 EMIT_VVAR(name, offset)
30 29
31#else 30#else
32 31
33#define DECLARE_VVAR(offset, type, name) \ 32#define DECLARE_VVAR(offset, type, name) \
34 static type const * const vvaraddr_ ## name = \ 33 static type const * const vvaraddr_ ## name = \
35 (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset)); 34 (void *)(VVAR_ADDRESS + (offset));
36 35
37#define DEFINE_VVAR(type, name) \ 36#define DEFINE_VVAR(type, name) \
38 type __vvar_ ## name \ 37 type name \
39 __attribute__((section(".vsyscall_var_" #name), aligned(16))) 38 __attribute__((section(".vvar_" #name), aligned(16)))
40 39
41#define VVAR(name) (*vvaraddr_ ## name) 40#define VVAR(name) (*vvaraddr_ ## name)
42 41
@@ -45,8 +44,7 @@
45/* DECLARE_VVAR(offset, type, name) */ 44/* DECLARE_VVAR(offset, type, name) */
46 45
47DECLARE_VVAR(0, volatile unsigned long, jiffies) 46DECLARE_VVAR(0, volatile unsigned long, jiffies)
48DECLARE_VVAR(8, int, vgetcpu_mode) 47DECLARE_VVAR(16, int, vgetcpu_mode)
49DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) 48DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
50 49
51#undef DECLARE_VVAR 50#undef DECLARE_VVAR
52#undef VSYSCALL_VARS_OFFSET
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index d240ea950519..417777de5a40 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -39,6 +39,8 @@
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/types.h> 40#include <linux/types.h>
41 41
42#include <trace/events/xen.h>
43
42#include <asm/page.h> 44#include <asm/page.h>
43#include <asm/pgtable.h> 45#include <asm/pgtable.h>
44 46
@@ -459,6 +461,8 @@ MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
459{ 461{
460 mcl->op = __HYPERVISOR_fpu_taskswitch; 462 mcl->op = __HYPERVISOR_fpu_taskswitch;
461 mcl->args[0] = set; 463 mcl->args[0] = set;
464
465 trace_xen_mc_entry(mcl, 1);
462} 466}
463 467
464static inline void 468static inline void
@@ -475,6 +479,8 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
475 mcl->args[2] = new_val.pte >> 32; 479 mcl->args[2] = new_val.pte >> 32;
476 mcl->args[3] = flags; 480 mcl->args[3] = flags;
477 } 481 }
482
483 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
478} 484}
479 485
480static inline void 486static inline void
@@ -485,6 +491,8 @@ MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
485 mcl->args[0] = cmd; 491 mcl->args[0] = cmd;
486 mcl->args[1] = (unsigned long)uop; 492 mcl->args[1] = (unsigned long)uop;
487 mcl->args[2] = count; 493 mcl->args[2] = count;
494
495 trace_xen_mc_entry(mcl, 3);
488} 496}
489 497
490static inline void 498static inline void
@@ -504,6 +512,8 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
504 mcl->args[3] = flags; 512 mcl->args[3] = flags;
505 mcl->args[4] = domid; 513 mcl->args[4] = domid;
506 } 514 }
515
516 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
507} 517}
508 518
509static inline void 519static inline void
@@ -520,6 +530,8 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
520 mcl->args[2] = desc.a; 530 mcl->args[2] = desc.a;
521 mcl->args[3] = desc.b; 531 mcl->args[3] = desc.b;
522 } 532 }
533
534 trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
523} 535}
524 536
525static inline void 537static inline void
@@ -528,6 +540,8 @@ MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
528 mcl->op = __HYPERVISOR_memory_op; 540 mcl->op = __HYPERVISOR_memory_op;
529 mcl->args[0] = cmd; 541 mcl->args[0] = cmd;
530 mcl->args[1] = (unsigned long)arg; 542 mcl->args[1] = (unsigned long)arg;
543
544 trace_xen_mc_entry(mcl, 2);
531} 545}
532 546
533static inline void 547static inline void
@@ -539,6 +553,8 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
539 mcl->args[1] = count; 553 mcl->args[1] = count;
540 mcl->args[2] = (unsigned long)success_count; 554 mcl->args[2] = (unsigned long)success_count;
541 mcl->args[3] = domid; 555 mcl->args[3] = domid;
556
557 trace_xen_mc_entry(mcl, 4);
542} 558}
543 559
544static inline void 560static inline void
@@ -550,6 +566,8 @@ MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
550 mcl->args[1] = count; 566 mcl->args[1] = count;
551 mcl->args[2] = (unsigned long)success_count; 567 mcl->args[2] = (unsigned long)success_count;
552 mcl->args[3] = domid; 568 mcl->args[3] = domid;
569
570 trace_xen_mc_entry(mcl, 4);
553} 571}
554 572
555static inline void 573static inline void
@@ -558,6 +576,8 @@ MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
558 mcl->op = __HYPERVISOR_set_gdt; 576 mcl->op = __HYPERVISOR_set_gdt;
559 mcl->args[0] = (unsigned long)frames; 577 mcl->args[0] = (unsigned long)frames;
560 mcl->args[1] = entries; 578 mcl->args[1] = entries;
579
580 trace_xen_mc_entry(mcl, 2);
561} 581}
562 582
563static inline void 583static inline void
@@ -567,6 +587,8 @@ MULTI_stack_switch(struct multicall_entry *mcl,
567 mcl->op = __HYPERVISOR_stack_switch; 587 mcl->op = __HYPERVISOR_stack_switch;
568 mcl->args[0] = ss; 588 mcl->args[0] = ss;
569 mcl->args[1] = esp; 589 mcl->args[1] = esp;
590
591 trace_xen_mc_entry(mcl, 2);
570} 592}
571 593
572#endif /* _ASM_X86_XEN_HYPERCALL_H */ 594#endif /* _ASM_X86_XEN_HYPERCALL_H */
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 4fbda9a3f339..968d57dd54c9 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -14,13 +14,14 @@ static inline int pci_xen_hvm_init(void)
14} 14}
15#endif 15#endif
16#if defined(CONFIG_XEN_DOM0) 16#if defined(CONFIG_XEN_DOM0)
17void __init xen_setup_pirqs(void); 17int __init pci_xen_initial_domain(void);
18int xen_find_device_domain_owner(struct pci_dev *dev); 18int xen_find_device_domain_owner(struct pci_dev *dev);
19int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); 19int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
20int xen_unregister_device_domain_owner(struct pci_dev *dev); 20int xen_unregister_device_domain_owner(struct pci_dev *dev);
21#else 21#else
22static inline void __init xen_setup_pirqs(void) 22static inline int __init pci_xen_initial_domain(void)
23{ 23{
24 return -1;
24} 25}
25static inline int xen_find_device_domain_owner(struct pci_dev *dev) 26static inline int xen_find_device_domain_owner(struct pci_dev *dev)
26{ 27{
diff --git a/arch/x86/include/asm/xen/trace_types.h b/arch/x86/include/asm/xen/trace_types.h
new file mode 100644
index 000000000000..21e1874c0a0b
--- /dev/null
+++ b/arch/x86/include/asm/xen/trace_types.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_XEN_TRACE_TYPES_H
2#define _ASM_XEN_TRACE_TYPES_H
3
4enum xen_mc_flush_reason {
5 XEN_MC_FL_NONE, /* explicit flush */
6 XEN_MC_FL_BATCH, /* out of hypercall space */
7 XEN_MC_FL_ARGS, /* out of argument space */
8 XEN_MC_FL_CALLBACK, /* out of callback space */
9};
10
11enum xen_mc_extend_args {
12 XEN_MC_XE_OK,
13 XEN_MC_XE_BAD_OP,
14 XEN_MC_XE_NO_SPACE
15};
16typedef void (*xen_mc_callback_fn_t)(void *);
17
18#endif /* _ASM_XEN_TRACE_TYPES_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90b06d4daee2..04105574c8e9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -24,17 +24,12 @@ endif
24nostackp := $(call cc-option, -fno-stack-protector) 24nostackp := $(call cc-option, -fno-stack-protector)
25CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) 25CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
26CFLAGS_hpet.o := $(nostackp) 26CFLAGS_hpet.o := $(nostackp)
27CFLAGS_vread_tsc_64.o := $(nostackp)
28CFLAGS_paravirt.o := $(nostackp) 27CFLAGS_paravirt.o := $(nostackp)
29GCOV_PROFILE_vsyscall_64.o := n 28GCOV_PROFILE_vsyscall_64.o := n
30GCOV_PROFILE_hpet.o := n 29GCOV_PROFILE_hpet.o := n
31GCOV_PROFILE_tsc.o := n 30GCOV_PROFILE_tsc.o := n
32GCOV_PROFILE_vread_tsc_64.o := n
33GCOV_PROFILE_paravirt.o := n 31GCOV_PROFILE_paravirt.o := n
34 32
35# vread_tsc_64 is hot and should be fully optimized:
36CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls
37
38obj-y := process_$(BITS).o signal.o entry_$(BITS).o 33obj-y := process_$(BITS).o signal.o entry_$(BITS).o
39obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 34obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
40obj-y += time.o ioport.o ldt.o dumpstack.o 35obj-y += time.o ioport.o ldt.o dumpstack.o
@@ -43,7 +38,8 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
43obj-y += probe_roms.o 38obj-y += probe_roms.o
44obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 39obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
45obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 40obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
46obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o 41obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
42obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
47obj-y += bootflag.o e820.o 43obj-y += bootflag.o e820.o
48obj-y += pci-dma.o quirks.o topology.o kdebugfs.o 44obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
49obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 45obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
@@ -123,7 +119,6 @@ ifeq ($(CONFIG_X86_64),y)
123 119
124 obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o 120 obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o
125 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o 121 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
126 obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
127 122
128 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o 123 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
129 obj-y += vsmp_64.o 124 obj-y += vsmp_64.o
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index ead21b663117..b4fd836e4053 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */
28pmode_cr4: .long 0 /* Saved %cr4 */ 28pmode_cr4: .long 0 /* Saved %cr4 */
29pmode_efer: .quad 0 /* Saved EFER */ 29pmode_efer: .quad 0 /* Saved EFER */
30pmode_gdt: .quad 0 30pmode_gdt: .quad 0
31pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
32pmode_behavior: .long 0 /* Wakeup behavior flags */
31realmode_flags: .long 0 33realmode_flags: .long 0
32real_magic: .long 0 34real_magic: .long 0
33trampoline_segment: .word 0 35trampoline_segment: .word 0
@@ -91,6 +93,18 @@ wakeup_code:
91 /* Call the C code */ 93 /* Call the C code */
92 calll main 94 calll main
93 95
96 /* Restore MISC_ENABLE before entering protected mode, in case
97 BIOS decided to clear XD_DISABLE during S3. */
98 movl pmode_behavior, %eax
99 btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
100 jnc 1f
101
102 movl pmode_misc_en, %eax
103 movl pmode_misc_en + 4, %edx
104 movl $MSR_IA32_MISC_ENABLE, %ecx
105 wrmsr
1061:
107
94 /* Do any other stuff... */ 108 /* Do any other stuff... */
95 109
96#ifndef CONFIG_64BIT 110#ifndef CONFIG_64BIT
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
index e1828c07e79c..97a29e1430e3 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -21,6 +21,9 @@ struct wakeup_header {
21 u32 pmode_efer_low; /* Protected mode EFER */ 21 u32 pmode_efer_low; /* Protected mode EFER */
22 u32 pmode_efer_high; 22 u32 pmode_efer_high;
23 u64 pmode_gdt; 23 u64 pmode_gdt;
24 u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
25 u32 pmode_misc_en_high;
26 u32 pmode_behavior; /* Wakeup routine behavior flags */
24 u32 realmode_flags; 27 u32 realmode_flags;
25 u32 real_magic; 28 u32 real_magic;
26 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ 29 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
@@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header;
39#define WAKEUP_HEADER_SIGNATURE 0x51ee1111 42#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
40#define WAKEUP_END_SIGNATURE 0x65a22c82 43#define WAKEUP_END_SIGNATURE 0x65a22c82
41 44
45/* Wakeup behavior bits */
46#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
47
42#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ 48#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 18a857ba7a25..103b6ab368d3 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void)
77 77
78 header->pmode_cr0 = read_cr0(); 78 header->pmode_cr0 = read_cr0();
79 header->pmode_cr4 = read_cr4_safe(); 79 header->pmode_cr4 = read_cr4_safe();
80 header->pmode_behavior = 0;
81 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
82 &header->pmode_misc_en_low,
83 &header->pmode_misc_en_high))
84 header->pmode_behavior |=
85 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
80 header->realmode_flags = acpi_realmode_flags; 86 header->realmode_flags = acpi_realmode_flags;
81 header->real_magic = 0x12345678; 87 header->real_magic = 0x12345678;
82 88
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a81f2d52f869..c63822816249 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -14,7 +14,6 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mce.h> 15#include <asm/mce.h>
16#include <asm/nmi.h> 16#include <asm/nmi.h>
17#include <asm/vsyscall.h>
18#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
20#include <asm/io.h> 19#include <asm/io.h>
@@ -250,7 +249,6 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
250 249
251extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 250extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
252extern s32 __smp_locks[], __smp_locks_end[]; 251extern s32 __smp_locks[], __smp_locks_end[];
253extern char __vsyscall_0;
254void *text_poke_early(void *addr, const void *opcode, size_t len); 252void *text_poke_early(void *addr, const void *opcode, size_t len);
255 253
256/* Replace instructions with better alternatives for this CPU type. 254/* Replace instructions with better alternatives for this CPU type.
@@ -263,6 +261,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
263 struct alt_instr *end) 261 struct alt_instr *end)
264{ 262{
265 struct alt_instr *a; 263 struct alt_instr *a;
264 u8 *instr, *replacement;
266 u8 insnbuf[MAX_PATCH_LEN]; 265 u8 insnbuf[MAX_PATCH_LEN];
267 266
268 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); 267 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
@@ -276,25 +275,23 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
276 * order. 275 * order.
277 */ 276 */
278 for (a = start; a < end; a++) { 277 for (a = start; a < end; a++) {
279 u8 *instr = a->instr; 278 instr = (u8 *)&a->instr_offset + a->instr_offset;
279 replacement = (u8 *)&a->repl_offset + a->repl_offset;
280 BUG_ON(a->replacementlen > a->instrlen); 280 BUG_ON(a->replacementlen > a->instrlen);
281 BUG_ON(a->instrlen > sizeof(insnbuf)); 281 BUG_ON(a->instrlen > sizeof(insnbuf));
282 BUG_ON(a->cpuid >= NCAPINTS*32); 282 BUG_ON(a->cpuid >= NCAPINTS*32);
283 if (!boot_cpu_has(a->cpuid)) 283 if (!boot_cpu_has(a->cpuid))
284 continue; 284 continue;
285#ifdef CONFIG_X86_64 285
286 /* vsyscall code is not mapped yet. resolve it manually. */ 286 memcpy(insnbuf, replacement, a->replacementlen);
287 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { 287
288 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); 288 /* 0xe8 is a relative jump; fix the offset. */
289 DPRINTK("%s: vsyscall fixup: %p => %p\n",
290 __func__, a->instr, instr);
291 }
292#endif
293 memcpy(insnbuf, a->replacement, a->replacementlen);
294 if (*insnbuf == 0xe8 && a->replacementlen == 5) 289 if (*insnbuf == 0xe8 && a->replacementlen == 5)
295 *(s32 *)(insnbuf + 1) += a->replacement - a->instr; 290 *(s32 *)(insnbuf + 1) += replacement - instr;
291
296 add_nops(insnbuf + a->replacementlen, 292 add_nops(insnbuf + a->replacementlen,
297 a->instrlen - a->replacementlen); 293 a->instrlen - a->replacementlen);
294
298 text_poke_early(instr, insnbuf, a->instrlen); 295 text_poke_early(instr, insnbuf, a->instrlen);
299 } 296 }
300} 297}
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
deleted file mode 100644
index 7c3a95e54ec5..000000000000
--- a/arch/x86/kernel/amd_iommu.c
+++ /dev/null
@@ -1,2764 +0,0 @@
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/pci-ats.h>
22#include <linux/bitmap.h>
23#include <linux/slab.h>
24#include <linux/debugfs.h>
25#include <linux/scatterlist.h>
26#include <linux/dma-mapping.h>
27#include <linux/iommu-helper.h>
28#include <linux/iommu.h>
29#include <linux/delay.h>
30#include <asm/proto.h>
31#include <asm/iommu.h>
32#include <asm/gart.h>
33#include <asm/dma.h>
34#include <asm/amd_iommu_proto.h>
35#include <asm/amd_iommu_types.h>
36#include <asm/amd_iommu.h>
37
38#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
39
40#define LOOP_TIMEOUT 100000
41
42static DEFINE_RWLOCK(amd_iommu_devtable_lock);
43
44/* A list of preallocated protection domains */
45static LIST_HEAD(iommu_pd_list);
46static DEFINE_SPINLOCK(iommu_pd_list_lock);
47
48/*
49 * Domain for untranslated devices - only allocated
50 * if iommu=pt passed on kernel cmd line.
51 */
52static struct protection_domain *pt_domain;
53
54static struct iommu_ops amd_iommu_ops;
55
56/*
57 * general struct to manage commands send to an IOMMU
58 */
59struct iommu_cmd {
60 u32 data[4];
61};
62
63static void update_domain(struct protection_domain *domain);
64
65/****************************************************************************
66 *
67 * Helper functions
68 *
69 ****************************************************************************/
70
71static inline u16 get_device_id(struct device *dev)
72{
73 struct pci_dev *pdev = to_pci_dev(dev);
74
75 return calc_devid(pdev->bus->number, pdev->devfn);
76}
77
78static struct iommu_dev_data *get_dev_data(struct device *dev)
79{
80 return dev->archdata.iommu;
81}
82
83/*
84 * In this function the list of preallocated protection domains is traversed to
85 * find the domain for a specific device
86 */
87static struct dma_ops_domain *find_protection_domain(u16 devid)
88{
89 struct dma_ops_domain *entry, *ret = NULL;
90 unsigned long flags;
91 u16 alias = amd_iommu_alias_table[devid];
92
93 if (list_empty(&iommu_pd_list))
94 return NULL;
95
96 spin_lock_irqsave(&iommu_pd_list_lock, flags);
97
98 list_for_each_entry(entry, &iommu_pd_list, list) {
99 if (entry->target_dev == devid ||
100 entry->target_dev == alias) {
101 ret = entry;
102 break;
103 }
104 }
105
106 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
107
108 return ret;
109}
110
111/*
112 * This function checks if the driver got a valid device from the caller to
113 * avoid dereferencing invalid pointers.
114 */
115static bool check_device(struct device *dev)
116{
117 u16 devid;
118
119 if (!dev || !dev->dma_mask)
120 return false;
121
122 /* No device or no PCI device */
123 if (dev->bus != &pci_bus_type)
124 return false;
125
126 devid = get_device_id(dev);
127
128 /* Out of our scope? */
129 if (devid > amd_iommu_last_bdf)
130 return false;
131
132 if (amd_iommu_rlookup_table[devid] == NULL)
133 return false;
134
135 return true;
136}
137
138static int iommu_init_device(struct device *dev)
139{
140 struct iommu_dev_data *dev_data;
141 struct pci_dev *pdev;
142 u16 devid, alias;
143
144 if (dev->archdata.iommu)
145 return 0;
146
147 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
148 if (!dev_data)
149 return -ENOMEM;
150
151 dev_data->dev = dev;
152
153 devid = get_device_id(dev);
154 alias = amd_iommu_alias_table[devid];
155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
156 if (pdev)
157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
162
163 atomic_set(&dev_data->bind, 0);
164
165 dev->archdata.iommu = dev_data;
166
167
168 return 0;
169}
170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
185static void iommu_uninit_device(struct device *dev)
186{
187 kfree(dev->archdata.iommu);
188}
189
190void __init amd_iommu_uninit_devices(void)
191{
192 struct pci_dev *pdev = NULL;
193
194 for_each_pci_dev(pdev) {
195
196 if (!check_device(&pdev->dev))
197 continue;
198
199 iommu_uninit_device(&pdev->dev);
200 }
201}
202
203int __init amd_iommu_init_devices(void)
204{
205 struct pci_dev *pdev = NULL;
206 int ret = 0;
207
208 for_each_pci_dev(pdev) {
209
210 if (!check_device(&pdev->dev))
211 continue;
212
213 ret = iommu_init_device(&pdev->dev);
214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
217 goto out_free;
218 }
219
220 return 0;
221
222out_free:
223
224 amd_iommu_uninit_devices();
225
226 return ret;
227}
228#ifdef CONFIG_AMD_IOMMU_STATS
229
230/*
231 * Initialization code for statistics collection
232 */
233
234DECLARE_STATS_COUNTER(compl_wait);
235DECLARE_STATS_COUNTER(cnt_map_single);
236DECLARE_STATS_COUNTER(cnt_unmap_single);
237DECLARE_STATS_COUNTER(cnt_map_sg);
238DECLARE_STATS_COUNTER(cnt_unmap_sg);
239DECLARE_STATS_COUNTER(cnt_alloc_coherent);
240DECLARE_STATS_COUNTER(cnt_free_coherent);
241DECLARE_STATS_COUNTER(cross_page);
242DECLARE_STATS_COUNTER(domain_flush_single);
243DECLARE_STATS_COUNTER(domain_flush_all);
244DECLARE_STATS_COUNTER(alloced_io_mem);
245DECLARE_STATS_COUNTER(total_map_requests);
246
247static struct dentry *stats_dir;
248static struct dentry *de_fflush;
249
250static void amd_iommu_stats_add(struct __iommu_counter *cnt)
251{
252 if (stats_dir == NULL)
253 return;
254
255 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
256 &cnt->value);
257}
258
259static void amd_iommu_stats_init(void)
260{
261 stats_dir = debugfs_create_dir("amd-iommu", NULL);
262 if (stats_dir == NULL)
263 return;
264
265 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
266 (u32 *)&amd_iommu_unmap_flush);
267
268 amd_iommu_stats_add(&compl_wait);
269 amd_iommu_stats_add(&cnt_map_single);
270 amd_iommu_stats_add(&cnt_unmap_single);
271 amd_iommu_stats_add(&cnt_map_sg);
272 amd_iommu_stats_add(&cnt_unmap_sg);
273 amd_iommu_stats_add(&cnt_alloc_coherent);
274 amd_iommu_stats_add(&cnt_free_coherent);
275 amd_iommu_stats_add(&cross_page);
276 amd_iommu_stats_add(&domain_flush_single);
277 amd_iommu_stats_add(&domain_flush_all);
278 amd_iommu_stats_add(&alloced_io_mem);
279 amd_iommu_stats_add(&total_map_requests);
280}
281
282#endif
283
284/****************************************************************************
285 *
286 * Interrupt handling functions
287 *
288 ****************************************************************************/
289
290static void dump_dte_entry(u16 devid)
291{
292 int i;
293
294 for (i = 0; i < 8; ++i)
295 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
296 amd_iommu_dev_table[devid].data[i]);
297}
298
299static void dump_command(unsigned long phys_addr)
300{
301 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
302 int i;
303
304 for (i = 0; i < 4; ++i)
305 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
306}
307
308static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
309{
310 u32 *event = __evt;
311 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
312 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
313 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
314 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
315 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
316
317 printk(KERN_ERR "AMD-Vi: Event logged [");
318
319 switch (type) {
320 case EVENT_TYPE_ILL_DEV:
321 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
322 "address=0x%016llx flags=0x%04x]\n",
323 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
324 address, flags);
325 dump_dte_entry(devid);
326 break;
327 case EVENT_TYPE_IO_FAULT:
328 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
329 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
330 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
331 domid, address, flags);
332 break;
333 case EVENT_TYPE_DEV_TAB_ERR:
334 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
335 "address=0x%016llx flags=0x%04x]\n",
336 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
337 address, flags);
338 break;
339 case EVENT_TYPE_PAGE_TAB_ERR:
340 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
341 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
342 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
343 domid, address, flags);
344 break;
345 case EVENT_TYPE_ILL_CMD:
346 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
347 dump_command(address);
348 break;
349 case EVENT_TYPE_CMD_HARD_ERR:
350 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
351 "flags=0x%04x]\n", address, flags);
352 break;
353 case EVENT_TYPE_IOTLB_INV_TO:
354 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
355 "address=0x%016llx]\n",
356 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
357 address);
358 break;
359 case EVENT_TYPE_INV_DEV_REQ:
360 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
361 "address=0x%016llx flags=0x%04x]\n",
362 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
363 address, flags);
364 break;
365 default:
366 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
367 }
368}
369
370static void iommu_poll_events(struct amd_iommu *iommu)
371{
372 u32 head, tail;
373 unsigned long flags;
374
375 spin_lock_irqsave(&iommu->lock, flags);
376
377 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
378 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
379
380 while (head != tail) {
381 iommu_print_event(iommu, iommu->evt_buf + head);
382 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
383 }
384
385 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
386
387 spin_unlock_irqrestore(&iommu->lock, flags);
388}
389
390irqreturn_t amd_iommu_int_thread(int irq, void *data)
391{
392 struct amd_iommu *iommu;
393
394 for_each_iommu(iommu)
395 iommu_poll_events(iommu);
396
397 return IRQ_HANDLED;
398}
399
400irqreturn_t amd_iommu_int_handler(int irq, void *data)
401{
402 return IRQ_WAKE_THREAD;
403}
404
405/****************************************************************************
406 *
407 * IOMMU command queuing functions
408 *
409 ****************************************************************************/
410
411static int wait_on_sem(volatile u64 *sem)
412{
413 int i = 0;
414
415 while (*sem == 0 && i < LOOP_TIMEOUT) {
416 udelay(1);
417 i += 1;
418 }
419
420 if (i == LOOP_TIMEOUT) {
421 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
422 return -EIO;
423 }
424
425 return 0;
426}
427
428static void copy_cmd_to_buffer(struct amd_iommu *iommu,
429 struct iommu_cmd *cmd,
430 u32 tail)
431{
432 u8 *target;
433
434 target = iommu->cmd_buf + tail;
435 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
436
437 /* Copy command to buffer */
438 memcpy(target, cmd, sizeof(*cmd));
439
440 /* Tell the IOMMU about it */
441 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
442}
443
444static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
445{
446 WARN_ON(address & 0x7ULL);
447
448 memset(cmd, 0, sizeof(*cmd));
449 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
450 cmd->data[1] = upper_32_bits(__pa(address));
451 cmd->data[2] = 1;
452 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
453}
454
455static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
456{
457 memset(cmd, 0, sizeof(*cmd));
458 cmd->data[0] = devid;
459 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
460}
461
462static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
463 size_t size, u16 domid, int pde)
464{
465 u64 pages;
466 int s;
467
468 pages = iommu_num_pages(address, size, PAGE_SIZE);
469 s = 0;
470
471 if (pages > 1) {
472 /*
473 * If we have to flush more than one page, flush all
474 * TLB entries for this domain
475 */
476 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
477 s = 1;
478 }
479
480 address &= PAGE_MASK;
481
482 memset(cmd, 0, sizeof(*cmd));
483 cmd->data[1] |= domid;
484 cmd->data[2] = lower_32_bits(address);
485 cmd->data[3] = upper_32_bits(address);
486 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
487 if (s) /* size bit - we flush more than one 4kb page */
488 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
489 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
490 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
491}
492
493static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
494 u64 address, size_t size)
495{
496 u64 pages;
497 int s;
498
499 pages = iommu_num_pages(address, size, PAGE_SIZE);
500 s = 0;
501
502 if (pages > 1) {
503 /*
504 * If we have to flush more than one page, flush all
505 * TLB entries for this domain
506 */
507 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
508 s = 1;
509 }
510
511 address &= PAGE_MASK;
512
513 memset(cmd, 0, sizeof(*cmd));
514 cmd->data[0] = devid;
515 cmd->data[0] |= (qdep & 0xff) << 24;
516 cmd->data[1] = devid;
517 cmd->data[2] = lower_32_bits(address);
518 cmd->data[3] = upper_32_bits(address);
519 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
520 if (s)
521 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
522}
523
524static void build_inv_all(struct iommu_cmd *cmd)
525{
526 memset(cmd, 0, sizeof(*cmd));
527 CMD_SET_TYPE(cmd, CMD_INV_ALL);
528}
529
530/*
531 * Writes the command to the IOMMUs command buffer and informs the
532 * hardware about the new command.
533 */
534static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
535{
536 u32 left, tail, head, next_tail;
537 unsigned long flags;
538
539 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
540
541again:
542 spin_lock_irqsave(&iommu->lock, flags);
543
544 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
545 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
546 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
547 left = (head - next_tail) % iommu->cmd_buf_size;
548
549 if (left <= 2) {
550 struct iommu_cmd sync_cmd;
551 volatile u64 sem = 0;
552 int ret;
553
554 build_completion_wait(&sync_cmd, (u64)&sem);
555 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
556
557 spin_unlock_irqrestore(&iommu->lock, flags);
558
559 if ((ret = wait_on_sem(&sem)) != 0)
560 return ret;
561
562 goto again;
563 }
564
565 copy_cmd_to_buffer(iommu, cmd, tail);
566
567 /* We need to sync now to make sure all commands are processed */
568 iommu->need_sync = true;
569
570 spin_unlock_irqrestore(&iommu->lock, flags);
571
572 return 0;
573}
574
575/*
576 * This function queues a completion wait command into the command
577 * buffer of an IOMMU
578 */
579static int iommu_completion_wait(struct amd_iommu *iommu)
580{
581 struct iommu_cmd cmd;
582 volatile u64 sem = 0;
583 int ret;
584
585 if (!iommu->need_sync)
586 return 0;
587
588 build_completion_wait(&cmd, (u64)&sem);
589
590 ret = iommu_queue_command(iommu, &cmd);
591 if (ret)
592 return ret;
593
594 return wait_on_sem(&sem);
595}
596
597static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
598{
599 struct iommu_cmd cmd;
600
601 build_inv_dte(&cmd, devid);
602
603 return iommu_queue_command(iommu, &cmd);
604}
605
606static void iommu_flush_dte_all(struct amd_iommu *iommu)
607{
608 u32 devid;
609
610 for (devid = 0; devid <= 0xffff; ++devid)
611 iommu_flush_dte(iommu, devid);
612
613 iommu_completion_wait(iommu);
614}
615
616/*
617 * This function uses heavy locking and may disable irqs for some time. But
618 * this is no issue because it is only called during resume.
619 */
620static void iommu_flush_tlb_all(struct amd_iommu *iommu)
621{
622 u32 dom_id;
623
624 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
625 struct iommu_cmd cmd;
626 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
627 dom_id, 1);
628 iommu_queue_command(iommu, &cmd);
629 }
630
631 iommu_completion_wait(iommu);
632}
633
634static void iommu_flush_all(struct amd_iommu *iommu)
635{
636 struct iommu_cmd cmd;
637
638 build_inv_all(&cmd);
639
640 iommu_queue_command(iommu, &cmd);
641 iommu_completion_wait(iommu);
642}
643
644void iommu_flush_all_caches(struct amd_iommu *iommu)
645{
646 if (iommu_feature(iommu, FEATURE_IA)) {
647 iommu_flush_all(iommu);
648 } else {
649 iommu_flush_dte_all(iommu);
650 iommu_flush_tlb_all(iommu);
651 }
652}
653
654/*
655 * Command send function for flushing on-device TLB
656 */
657static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
658{
659 struct pci_dev *pdev = to_pci_dev(dev);
660 struct amd_iommu *iommu;
661 struct iommu_cmd cmd;
662 u16 devid;
663 int qdep;
664
665 qdep = pci_ats_queue_depth(pdev);
666 devid = get_device_id(dev);
667 iommu = amd_iommu_rlookup_table[devid];
668
669 build_inv_iotlb_pages(&cmd, devid, qdep, address, size);
670
671 return iommu_queue_command(iommu, &cmd);
672}
673
674/*
675 * Command send function for invalidating a device table entry
676 */
677static int device_flush_dte(struct device *dev)
678{
679 struct amd_iommu *iommu;
680 struct pci_dev *pdev;
681 u16 devid;
682 int ret;
683
684 pdev = to_pci_dev(dev);
685 devid = get_device_id(dev);
686 iommu = amd_iommu_rlookup_table[devid];
687
688 ret = iommu_flush_dte(iommu, devid);
689 if (ret)
690 return ret;
691
692 if (pci_ats_enabled(pdev))
693 ret = device_flush_iotlb(dev, 0, ~0UL);
694
695 return ret;
696}
697
698/*
699 * TLB invalidation function which is called from the mapping functions.
700 * It invalidates a single PTE if the range to flush is within a single
701 * page. Otherwise it flushes the whole TLB of the IOMMU.
702 */
703static void __domain_flush_pages(struct protection_domain *domain,
704 u64 address, size_t size, int pde)
705{
706 struct iommu_dev_data *dev_data;
707 struct iommu_cmd cmd;
708 int ret = 0, i;
709
710 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
711
712 for (i = 0; i < amd_iommus_present; ++i) {
713 if (!domain->dev_iommu[i])
714 continue;
715
716 /*
717 * Devices of this domain are behind this IOMMU
718 * We need a TLB flush
719 */
720 ret |= iommu_queue_command(amd_iommus[i], &cmd);
721 }
722
723 list_for_each_entry(dev_data, &domain->dev_list, list) {
724 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
725
726 if (!pci_ats_enabled(pdev))
727 continue;
728
729 ret |= device_flush_iotlb(dev_data->dev, address, size);
730 }
731
732 WARN_ON(ret);
733}
734
735static void domain_flush_pages(struct protection_domain *domain,
736 u64 address, size_t size)
737{
738 __domain_flush_pages(domain, address, size, 0);
739}
740
741/* Flush the whole IO/TLB for a given protection domain */
742static void domain_flush_tlb(struct protection_domain *domain)
743{
744 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
745}
746
747/* Flush the whole IO/TLB for a given protection domain - including PDE */
748static void domain_flush_tlb_pde(struct protection_domain *domain)
749{
750 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
751}
752
753static void domain_flush_complete(struct protection_domain *domain)
754{
755 int i;
756
757 for (i = 0; i < amd_iommus_present; ++i) {
758 if (!domain->dev_iommu[i])
759 continue;
760
761 /*
762 * Devices of this domain are behind this IOMMU
763 * We need to wait for completion of all commands.
764 */
765 iommu_completion_wait(amd_iommus[i]);
766 }
767}
768
769
770/*
771 * This function flushes the DTEs for all devices in domain
772 */
773static void domain_flush_devices(struct protection_domain *domain)
774{
775 struct iommu_dev_data *dev_data;
776 unsigned long flags;
777
778 spin_lock_irqsave(&domain->lock, flags);
779
780 list_for_each_entry(dev_data, &domain->dev_list, list)
781 device_flush_dte(dev_data->dev);
782
783 spin_unlock_irqrestore(&domain->lock, flags);
784}
785
786/****************************************************************************
787 *
788 * The functions below are used the create the page table mappings for
789 * unity mapped regions.
790 *
791 ****************************************************************************/
792
793/*
794 * This function is used to add another level to an IO page table. Adding
795 * another level increases the size of the address space by 9 bits to a size up
796 * to 64 bits.
797 */
798static bool increase_address_space(struct protection_domain *domain,
799 gfp_t gfp)
800{
801 u64 *pte;
802
803 if (domain->mode == PAGE_MODE_6_LEVEL)
804 /* address space already 64 bit large */
805 return false;
806
807 pte = (void *)get_zeroed_page(gfp);
808 if (!pte)
809 return false;
810
811 *pte = PM_LEVEL_PDE(domain->mode,
812 virt_to_phys(domain->pt_root));
813 domain->pt_root = pte;
814 domain->mode += 1;
815 domain->updated = true;
816
817 return true;
818}
819
820static u64 *alloc_pte(struct protection_domain *domain,
821 unsigned long address,
822 unsigned long page_size,
823 u64 **pte_page,
824 gfp_t gfp)
825{
826 int level, end_lvl;
827 u64 *pte, *page;
828
829 BUG_ON(!is_power_of_2(page_size));
830
831 while (address > PM_LEVEL_SIZE(domain->mode))
832 increase_address_space(domain, gfp);
833
834 level = domain->mode - 1;
835 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
836 address = PAGE_SIZE_ALIGN(address, page_size);
837 end_lvl = PAGE_SIZE_LEVEL(page_size);
838
839 while (level > end_lvl) {
840 if (!IOMMU_PTE_PRESENT(*pte)) {
841 page = (u64 *)get_zeroed_page(gfp);
842 if (!page)
843 return NULL;
844 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
845 }
846
847 /* No level skipping support yet */
848 if (PM_PTE_LEVEL(*pte) != level)
849 return NULL;
850
851 level -= 1;
852
853 pte = IOMMU_PTE_PAGE(*pte);
854
855 if (pte_page && level == end_lvl)
856 *pte_page = pte;
857
858 pte = &pte[PM_LEVEL_INDEX(level, address)];
859 }
860
861 return pte;
862}
863
864/*
865 * This function checks if there is a PTE for a given dma address. If
866 * there is one, it returns the pointer to it.
867 */
868static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
869{
870 int level;
871 u64 *pte;
872
873 if (address > PM_LEVEL_SIZE(domain->mode))
874 return NULL;
875
876 level = domain->mode - 1;
877 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
878
879 while (level > 0) {
880
881 /* Not Present */
882 if (!IOMMU_PTE_PRESENT(*pte))
883 return NULL;
884
885 /* Large PTE */
886 if (PM_PTE_LEVEL(*pte) == 0x07) {
887 unsigned long pte_mask, __pte;
888
889 /*
890 * If we have a series of large PTEs, make
891 * sure to return a pointer to the first one.
892 */
893 pte_mask = PTE_PAGE_SIZE(*pte);
894 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
895 __pte = ((unsigned long)pte) & pte_mask;
896
897 return (u64 *)__pte;
898 }
899
900 /* No level skipping support yet */
901 if (PM_PTE_LEVEL(*pte) != level)
902 return NULL;
903
904 level -= 1;
905
906 /* Walk to the next level */
907 pte = IOMMU_PTE_PAGE(*pte);
908 pte = &pte[PM_LEVEL_INDEX(level, address)];
909 }
910
911 return pte;
912}
913
914/*
915 * Generic mapping functions. It maps a physical address into a DMA
916 * address space. It allocates the page table pages if necessary.
917 * In the future it can be extended to a generic mapping function
918 * supporting all features of AMD IOMMU page tables like level skipping
919 * and full 64 bit address spaces.
920 */
921static int iommu_map_page(struct protection_domain *dom,
922 unsigned long bus_addr,
923 unsigned long phys_addr,
924 int prot,
925 unsigned long page_size)
926{
927 u64 __pte, *pte;
928 int i, count;
929
930 if (!(prot & IOMMU_PROT_MASK))
931 return -EINVAL;
932
933 bus_addr = PAGE_ALIGN(bus_addr);
934 phys_addr = PAGE_ALIGN(phys_addr);
935 count = PAGE_SIZE_PTE_COUNT(page_size);
936 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
937
938 for (i = 0; i < count; ++i)
939 if (IOMMU_PTE_PRESENT(pte[i]))
940 return -EBUSY;
941
942 if (page_size > PAGE_SIZE) {
943 __pte = PAGE_SIZE_PTE(phys_addr, page_size);
944 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
945 } else
946 __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
947
948 if (prot & IOMMU_PROT_IR)
949 __pte |= IOMMU_PTE_IR;
950 if (prot & IOMMU_PROT_IW)
951 __pte |= IOMMU_PTE_IW;
952
953 for (i = 0; i < count; ++i)
954 pte[i] = __pte;
955
956 update_domain(dom);
957
958 return 0;
959}
960
961static unsigned long iommu_unmap_page(struct protection_domain *dom,
962 unsigned long bus_addr,
963 unsigned long page_size)
964{
965 unsigned long long unmap_size, unmapped;
966 u64 *pte;
967
968 BUG_ON(!is_power_of_2(page_size));
969
970 unmapped = 0;
971
972 while (unmapped < page_size) {
973
974 pte = fetch_pte(dom, bus_addr);
975
976 if (!pte) {
977 /*
978 * No PTE for this address
979 * move forward in 4kb steps
980 */
981 unmap_size = PAGE_SIZE;
982 } else if (PM_PTE_LEVEL(*pte) == 0) {
983 /* 4kb PTE found for this address */
984 unmap_size = PAGE_SIZE;
985 *pte = 0ULL;
986 } else {
987 int count, i;
988
989 /* Large PTE found which maps this address */
990 unmap_size = PTE_PAGE_SIZE(*pte);
991 count = PAGE_SIZE_PTE_COUNT(unmap_size);
992 for (i = 0; i < count; i++)
993 pte[i] = 0ULL;
994 }
995
996 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
997 unmapped += unmap_size;
998 }
999
1000 BUG_ON(!is_power_of_2(unmapped));
1001
1002 return unmapped;
1003}
1004
1005/*
1006 * This function checks if a specific unity mapping entry is needed for
1007 * this specific IOMMU.
1008 */
1009static int iommu_for_unity_map(struct amd_iommu *iommu,
1010 struct unity_map_entry *entry)
1011{
1012 u16 bdf, i;
1013
1014 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
1015 bdf = amd_iommu_alias_table[i];
1016 if (amd_iommu_rlookup_table[bdf] == iommu)
1017 return 1;
1018 }
1019
1020 return 0;
1021}
1022
1023/*
1024 * This function actually applies the mapping to the page table of the
1025 * dma_ops domain.
1026 */
1027static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
1028 struct unity_map_entry *e)
1029{
1030 u64 addr;
1031 int ret;
1032
1033 for (addr = e->address_start; addr < e->address_end;
1034 addr += PAGE_SIZE) {
1035 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
1036 PAGE_SIZE);
1037 if (ret)
1038 return ret;
1039 /*
1040 * if unity mapping is in aperture range mark the page
1041 * as allocated in the aperture
1042 */
1043 if (addr < dma_dom->aperture_size)
1044 __set_bit(addr >> PAGE_SHIFT,
1045 dma_dom->aperture[0]->bitmap);
1046 }
1047
1048 return 0;
1049}
1050
1051/*
1052 * Init the unity mappings for a specific IOMMU in the system
1053 *
1054 * Basically iterates over all unity mapping entries and applies them to
1055 * the default domain DMA of that IOMMU if necessary.
1056 */
1057static int iommu_init_unity_mappings(struct amd_iommu *iommu)
1058{
1059 struct unity_map_entry *entry;
1060 int ret;
1061
1062 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
1063 if (!iommu_for_unity_map(iommu, entry))
1064 continue;
1065 ret = dma_ops_unity_map(iommu->default_dom, entry);
1066 if (ret)
1067 return ret;
1068 }
1069
1070 return 0;
1071}
1072
1073/*
1074 * Inits the unity mappings required for a specific device
1075 */
1076static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
1077 u16 devid)
1078{
1079 struct unity_map_entry *e;
1080 int ret;
1081
1082 list_for_each_entry(e, &amd_iommu_unity_map, list) {
1083 if (!(devid >= e->devid_start && devid <= e->devid_end))
1084 continue;
1085 ret = dma_ops_unity_map(dma_dom, e);
1086 if (ret)
1087 return ret;
1088 }
1089
1090 return 0;
1091}
1092
1093/****************************************************************************
1094 *
1095 * The next functions belong to the address allocator for the dma_ops
1096 * interface functions. They work like the allocators in the other IOMMU
1097 * drivers. Its basically a bitmap which marks the allocated pages in
1098 * the aperture. Maybe it could be enhanced in the future to a more
1099 * efficient allocator.
1100 *
1101 ****************************************************************************/
1102
1103/*
1104 * The address allocator core functions.
1105 *
1106 * called with domain->lock held
1107 */
1108
1109/*
1110 * Used to reserve address ranges in the aperture (e.g. for exclusion
1111 * ranges.
1112 */
1113static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1114 unsigned long start_page,
1115 unsigned int pages)
1116{
1117 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1118
1119 if (start_page + pages > last_page)
1120 pages = last_page - start_page;
1121
1122 for (i = start_page; i < start_page + pages; ++i) {
1123 int index = i / APERTURE_RANGE_PAGES;
1124 int page = i % APERTURE_RANGE_PAGES;
1125 __set_bit(page, dom->aperture[index]->bitmap);
1126 }
1127}
1128
1129/*
1130 * This function is used to add a new aperture range to an existing
1131 * aperture in case of dma_ops domain allocation or address allocation
1132 * failure.
1133 */
1134static int alloc_new_range(struct dma_ops_domain *dma_dom,
1135 bool populate, gfp_t gfp)
1136{
1137 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1138 struct amd_iommu *iommu;
1139 unsigned long i;
1140
1141#ifdef CONFIG_IOMMU_STRESS
1142 populate = false;
1143#endif
1144
1145 if (index >= APERTURE_MAX_RANGES)
1146 return -ENOMEM;
1147
1148 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1149 if (!dma_dom->aperture[index])
1150 return -ENOMEM;
1151
1152 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1153 if (!dma_dom->aperture[index]->bitmap)
1154 goto out_free;
1155
1156 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
1157
1158 if (populate) {
1159 unsigned long address = dma_dom->aperture_size;
1160 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1161 u64 *pte, *pte_page;
1162
1163 for (i = 0; i < num_ptes; ++i) {
1164 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1165 &pte_page, gfp);
1166 if (!pte)
1167 goto out_free;
1168
1169 dma_dom->aperture[index]->pte_pages[i] = pte_page;
1170
1171 address += APERTURE_RANGE_SIZE / 64;
1172 }
1173 }
1174
1175 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
1176
1177 /* Initialize the exclusion range if necessary */
1178 for_each_iommu(iommu) {
1179 if (iommu->exclusion_start &&
1180 iommu->exclusion_start >= dma_dom->aperture[index]->offset
1181 && iommu->exclusion_start < dma_dom->aperture_size) {
1182 unsigned long startpage;
1183 int pages = iommu_num_pages(iommu->exclusion_start,
1184 iommu->exclusion_length,
1185 PAGE_SIZE);
1186 startpage = iommu->exclusion_start >> PAGE_SHIFT;
1187 dma_ops_reserve_addresses(dma_dom, startpage, pages);
1188 }
1189 }
1190
1191 /*
1192 * Check for areas already mapped as present in the new aperture
1193 * range and mark those pages as reserved in the allocator. Such
1194 * mappings may already exist as a result of requested unity
1195 * mappings for devices.
1196 */
1197 for (i = dma_dom->aperture[index]->offset;
1198 i < dma_dom->aperture_size;
1199 i += PAGE_SIZE) {
1200 u64 *pte = fetch_pte(&dma_dom->domain, i);
1201 if (!pte || !IOMMU_PTE_PRESENT(*pte))
1202 continue;
1203
1204 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
1205 }
1206
1207 update_domain(&dma_dom->domain);
1208
1209 return 0;
1210
1211out_free:
1212 update_domain(&dma_dom->domain);
1213
1214 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1215
1216 kfree(dma_dom->aperture[index]);
1217 dma_dom->aperture[index] = NULL;
1218
1219 return -ENOMEM;
1220}
1221
1222static unsigned long dma_ops_area_alloc(struct device *dev,
1223 struct dma_ops_domain *dom,
1224 unsigned int pages,
1225 unsigned long align_mask,
1226 u64 dma_mask,
1227 unsigned long start)
1228{
1229 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
1230 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1231 int i = start >> APERTURE_RANGE_SHIFT;
1232 unsigned long boundary_size;
1233 unsigned long address = -1;
1234 unsigned long limit;
1235
1236 next_bit >>= PAGE_SHIFT;
1237
1238 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1239 PAGE_SIZE) >> PAGE_SHIFT;
1240
1241 for (;i < max_index; ++i) {
1242 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1243
1244 if (dom->aperture[i]->offset >= dma_mask)
1245 break;
1246
1247 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1248 dma_mask >> PAGE_SHIFT);
1249
1250 address = iommu_area_alloc(dom->aperture[i]->bitmap,
1251 limit, next_bit, pages, 0,
1252 boundary_size, align_mask);
1253 if (address != -1) {
1254 address = dom->aperture[i]->offset +
1255 (address << PAGE_SHIFT);
1256 dom->next_address = address + (pages << PAGE_SHIFT);
1257 break;
1258 }
1259
1260 next_bit = 0;
1261 }
1262
1263 return address;
1264}
1265
1266static unsigned long dma_ops_alloc_addresses(struct device *dev,
1267 struct dma_ops_domain *dom,
1268 unsigned int pages,
1269 unsigned long align_mask,
1270 u64 dma_mask)
1271{
1272 unsigned long address;
1273
1274#ifdef CONFIG_IOMMU_STRESS
1275 dom->next_address = 0;
1276 dom->need_flush = true;
1277#endif
1278
1279 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1280 dma_mask, dom->next_address);
1281
1282 if (address == -1) {
1283 dom->next_address = 0;
1284 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1285 dma_mask, 0);
1286 dom->need_flush = true;
1287 }
1288
1289 if (unlikely(address == -1))
1290 address = DMA_ERROR_CODE;
1291
1292 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1293
1294 return address;
1295}
1296
1297/*
1298 * The address free function.
1299 *
1300 * called with domain->lock held
1301 */
1302static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1303 unsigned long address,
1304 unsigned int pages)
1305{
1306 unsigned i = address >> APERTURE_RANGE_SHIFT;
1307 struct aperture_range *range = dom->aperture[i];
1308
1309 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1310
1311#ifdef CONFIG_IOMMU_STRESS
1312 if (i < 4)
1313 return;
1314#endif
1315
1316 if (address >= dom->next_address)
1317 dom->need_flush = true;
1318
1319 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1320
1321 bitmap_clear(range->bitmap, address, pages);
1322
1323}
1324
1325/****************************************************************************
1326 *
1327 * The next functions belong to the domain allocation. A domain is
1328 * allocated for every IOMMU as the default domain. If device isolation
1329 * is enabled, every device get its own domain. The most important thing
1330 * about domains is the page table mapping the DMA address space they
1331 * contain.
1332 *
1333 ****************************************************************************/
1334
1335/*
1336 * This function adds a protection domain to the global protection domain list
1337 */
1338static void add_domain_to_list(struct protection_domain *domain)
1339{
1340 unsigned long flags;
1341
1342 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1343 list_add(&domain->list, &amd_iommu_pd_list);
1344 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1345}
1346
1347/*
1348 * This function removes a protection domain to the global
1349 * protection domain list
1350 */
1351static void del_domain_from_list(struct protection_domain *domain)
1352{
1353 unsigned long flags;
1354
1355 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1356 list_del(&domain->list);
1357 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1358}
1359
1360static u16 domain_id_alloc(void)
1361{
1362 unsigned long flags;
1363 int id;
1364
1365 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1366 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1367 BUG_ON(id == 0);
1368 if (id > 0 && id < MAX_DOMAIN_ID)
1369 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1370 else
1371 id = 0;
1372 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1373
1374 return id;
1375}
1376
1377static void domain_id_free(int id)
1378{
1379 unsigned long flags;
1380
1381 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1382 if (id > 0 && id < MAX_DOMAIN_ID)
1383 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1384 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1385}
1386
1387static void free_pagetable(struct protection_domain *domain)
1388{
1389 int i, j;
1390 u64 *p1, *p2, *p3;
1391
1392 p1 = domain->pt_root;
1393
1394 if (!p1)
1395 return;
1396
1397 for (i = 0; i < 512; ++i) {
1398 if (!IOMMU_PTE_PRESENT(p1[i]))
1399 continue;
1400
1401 p2 = IOMMU_PTE_PAGE(p1[i]);
1402 for (j = 0; j < 512; ++j) {
1403 if (!IOMMU_PTE_PRESENT(p2[j]))
1404 continue;
1405 p3 = IOMMU_PTE_PAGE(p2[j]);
1406 free_page((unsigned long)p3);
1407 }
1408
1409 free_page((unsigned long)p2);
1410 }
1411
1412 free_page((unsigned long)p1);
1413
1414 domain->pt_root = NULL;
1415}
1416
1417/*
1418 * Free a domain, only used if something went wrong in the
1419 * allocation path and we need to free an already allocated page table
1420 */
1421static void dma_ops_domain_free(struct dma_ops_domain *dom)
1422{
1423 int i;
1424
1425 if (!dom)
1426 return;
1427
1428 del_domain_from_list(&dom->domain);
1429
1430 free_pagetable(&dom->domain);
1431
1432 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1433 if (!dom->aperture[i])
1434 continue;
1435 free_page((unsigned long)dom->aperture[i]->bitmap);
1436 kfree(dom->aperture[i]);
1437 }
1438
1439 kfree(dom);
1440}
1441
1442/*
1443 * Allocates a new protection domain usable for the dma_ops functions.
1444 * It also initializes the page table and the address allocator data
1445 * structures required for the dma_ops interface
1446 */
1447static struct dma_ops_domain *dma_ops_domain_alloc(void)
1448{
1449 struct dma_ops_domain *dma_dom;
1450
1451 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1452 if (!dma_dom)
1453 return NULL;
1454
1455 spin_lock_init(&dma_dom->domain.lock);
1456
1457 dma_dom->domain.id = domain_id_alloc();
1458 if (dma_dom->domain.id == 0)
1459 goto free_dma_dom;
1460 INIT_LIST_HEAD(&dma_dom->domain.dev_list);
1461 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1462 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1463 dma_dom->domain.flags = PD_DMA_OPS_MASK;
1464 dma_dom->domain.priv = dma_dom;
1465 if (!dma_dom->domain.pt_root)
1466 goto free_dma_dom;
1467
1468 dma_dom->need_flush = false;
1469 dma_dom->target_dev = 0xffff;
1470
1471 add_domain_to_list(&dma_dom->domain);
1472
1473 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1474 goto free_dma_dom;
1475
1476 /*
1477 * mark the first page as allocated so we never return 0 as
1478 * a valid dma-address. So we can use 0 as error value
1479 */
1480 dma_dom->aperture[0]->bitmap[0] = 1;
1481 dma_dom->next_address = 0;
1482
1483
1484 return dma_dom;
1485
1486free_dma_dom:
1487 dma_ops_domain_free(dma_dom);
1488
1489 return NULL;
1490}
1491
1492/*
1493 * little helper function to check whether a given protection domain is a
1494 * dma_ops domain
1495 */
1496static bool dma_ops_domain(struct protection_domain *domain)
1497{
1498 return domain->flags & PD_DMA_OPS_MASK;
1499}
1500
1501static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1502{
1503 u64 pte_root = virt_to_phys(domain->pt_root);
1504 u32 flags = 0;
1505
1506 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1507 << DEV_ENTRY_MODE_SHIFT;
1508 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1509
1510 if (ats)
1511 flags |= DTE_FLAG_IOTLB;
1512
1513 amd_iommu_dev_table[devid].data[3] |= flags;
1514 amd_iommu_dev_table[devid].data[2] = domain->id;
1515 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1516 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1517}
1518
1519static void clear_dte_entry(u16 devid)
1520{
1521 /* remove entry from the device table seen by the hardware */
1522 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1523 amd_iommu_dev_table[devid].data[1] = 0;
1524 amd_iommu_dev_table[devid].data[2] = 0;
1525
1526 amd_iommu_apply_erratum_63(devid);
1527}
1528
1529static void do_attach(struct device *dev, struct protection_domain *domain)
1530{
1531 struct iommu_dev_data *dev_data;
1532 struct amd_iommu *iommu;
1533 struct pci_dev *pdev;
1534 bool ats = false;
1535 u16 devid;
1536
1537 devid = get_device_id(dev);
1538 iommu = amd_iommu_rlookup_table[devid];
1539 dev_data = get_dev_data(dev);
1540 pdev = to_pci_dev(dev);
1541
1542 if (amd_iommu_iotlb_sup)
1543 ats = pci_ats_enabled(pdev);
1544
1545 /* Update data structures */
1546 dev_data->domain = domain;
1547 list_add(&dev_data->list, &domain->dev_list);
1548 set_dte_entry(devid, domain, ats);
1549
1550 /* Do reference counting */
1551 domain->dev_iommu[iommu->index] += 1;
1552 domain->dev_cnt += 1;
1553
1554 /* Flush the DTE entry */
1555 device_flush_dte(dev);
1556}
1557
1558static void do_detach(struct device *dev)
1559{
1560 struct iommu_dev_data *dev_data;
1561 struct amd_iommu *iommu;
1562 u16 devid;
1563
1564 devid = get_device_id(dev);
1565 iommu = amd_iommu_rlookup_table[devid];
1566 dev_data = get_dev_data(dev);
1567
1568 /* decrease reference counters */
1569 dev_data->domain->dev_iommu[iommu->index] -= 1;
1570 dev_data->domain->dev_cnt -= 1;
1571
1572 /* Update data structures */
1573 dev_data->domain = NULL;
1574 list_del(&dev_data->list);
1575 clear_dte_entry(devid);
1576
1577 /* Flush the DTE entry */
1578 device_flush_dte(dev);
1579}
1580
1581/*
1582 * If a device is not yet associated with a domain, this function does
1583 * assigns it visible for the hardware
1584 */
1585static int __attach_device(struct device *dev,
1586 struct protection_domain *domain)
1587{
1588 struct iommu_dev_data *dev_data, *alias_data;
1589 int ret;
1590
1591 dev_data = get_dev_data(dev);
1592 alias_data = get_dev_data(dev_data->alias);
1593
1594 if (!alias_data)
1595 return -EINVAL;
1596
1597 /* lock domain */
1598 spin_lock(&domain->lock);
1599
1600 /* Some sanity checks */
1601 ret = -EBUSY;
1602 if (alias_data->domain != NULL &&
1603 alias_data->domain != domain)
1604 goto out_unlock;
1605
1606 if (dev_data->domain != NULL &&
1607 dev_data->domain != domain)
1608 goto out_unlock;
1609
1610 /* Do real assignment */
1611 if (dev_data->alias != dev) {
1612 alias_data = get_dev_data(dev_data->alias);
1613 if (alias_data->domain == NULL)
1614 do_attach(dev_data->alias, domain);
1615
1616 atomic_inc(&alias_data->bind);
1617 }
1618
1619 if (dev_data->domain == NULL)
1620 do_attach(dev, domain);
1621
1622 atomic_inc(&dev_data->bind);
1623
1624 ret = 0;
1625
1626out_unlock:
1627
1628 /* ready */
1629 spin_unlock(&domain->lock);
1630
1631 return ret;
1632}
1633
1634/*
1635 * If a device is not yet associated with a domain, this function does
1636 * assigns it visible for the hardware
1637 */
1638static int attach_device(struct device *dev,
1639 struct protection_domain *domain)
1640{
1641 struct pci_dev *pdev = to_pci_dev(dev);
1642 unsigned long flags;
1643 int ret;
1644
1645 if (amd_iommu_iotlb_sup)
1646 pci_enable_ats(pdev, PAGE_SHIFT);
1647
1648 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1649 ret = __attach_device(dev, domain);
1650 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1651
1652 /*
1653 * We might boot into a crash-kernel here. The crashed kernel
1654 * left the caches in the IOMMU dirty. So we have to flush
1655 * here to evict all dirty stuff.
1656 */
1657 domain_flush_tlb_pde(domain);
1658
1659 return ret;
1660}
1661
1662/*
1663 * Removes a device from a protection domain (unlocked)
1664 */
1665static void __detach_device(struct device *dev)
1666{
1667 struct iommu_dev_data *dev_data = get_dev_data(dev);
1668 struct iommu_dev_data *alias_data;
1669 struct protection_domain *domain;
1670 unsigned long flags;
1671
1672 BUG_ON(!dev_data->domain);
1673
1674 domain = dev_data->domain;
1675
1676 spin_lock_irqsave(&domain->lock, flags);
1677
1678 if (dev_data->alias != dev) {
1679 alias_data = get_dev_data(dev_data->alias);
1680 if (atomic_dec_and_test(&alias_data->bind))
1681 do_detach(dev_data->alias);
1682 }
1683
1684 if (atomic_dec_and_test(&dev_data->bind))
1685 do_detach(dev);
1686
1687 spin_unlock_irqrestore(&domain->lock, flags);
1688
1689 /*
1690 * If we run in passthrough mode the device must be assigned to the
1691 * passthrough domain if it is detached from any other domain.
1692 * Make sure we can deassign from the pt_domain itself.
1693 */
1694 if (iommu_pass_through &&
1695 (dev_data->domain == NULL && domain != pt_domain))
1696 __attach_device(dev, pt_domain);
1697}
1698
1699/*
1700 * Removes a device from a protection domain (with devtable_lock held)
1701 */
1702static void detach_device(struct device *dev)
1703{
1704 struct pci_dev *pdev = to_pci_dev(dev);
1705 unsigned long flags;
1706
1707 /* lock device table */
1708 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1709 __detach_device(dev);
1710 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1711
1712 if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev))
1713 pci_disable_ats(pdev);
1714}
1715
1716/*
1717 * Find out the protection domain structure for a given PCI device. This
1718 * will give us the pointer to the page table root for example.
1719 */
1720static struct protection_domain *domain_for_device(struct device *dev)
1721{
1722 struct protection_domain *dom;
1723 struct iommu_dev_data *dev_data, *alias_data;
1724 unsigned long flags;
1725 u16 devid;
1726
1727 devid = get_device_id(dev);
1728 dev_data = get_dev_data(dev);
1729 alias_data = get_dev_data(dev_data->alias);
1730 if (!alias_data)
1731 return NULL;
1732
1733 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1734 dom = dev_data->domain;
1735 if (dom == NULL &&
1736 alias_data->domain != NULL) {
1737 __attach_device(dev, alias_data->domain);
1738 dom = alias_data->domain;
1739 }
1740
1741 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1742
1743 return dom;
1744}
1745
1746static int device_change_notifier(struct notifier_block *nb,
1747 unsigned long action, void *data)
1748{
1749 struct device *dev = data;
1750 u16 devid;
1751 struct protection_domain *domain;
1752 struct dma_ops_domain *dma_domain;
1753 struct amd_iommu *iommu;
1754 unsigned long flags;
1755
1756 if (!check_device(dev))
1757 return 0;
1758
1759 devid = get_device_id(dev);
1760 iommu = amd_iommu_rlookup_table[devid];
1761
1762 switch (action) {
1763 case BUS_NOTIFY_UNBOUND_DRIVER:
1764
1765 domain = domain_for_device(dev);
1766
1767 if (!domain)
1768 goto out;
1769 if (iommu_pass_through)
1770 break;
1771 detach_device(dev);
1772 break;
1773 case BUS_NOTIFY_ADD_DEVICE:
1774
1775 iommu_init_device(dev);
1776
1777 domain = domain_for_device(dev);
1778
1779 /* allocate a protection domain if a device is added */
1780 dma_domain = find_protection_domain(devid);
1781 if (dma_domain)
1782 goto out;
1783 dma_domain = dma_ops_domain_alloc();
1784 if (!dma_domain)
1785 goto out;
1786 dma_domain->target_dev = devid;
1787
1788 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1789 list_add_tail(&dma_domain->list, &iommu_pd_list);
1790 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1791
1792 break;
1793 case BUS_NOTIFY_DEL_DEVICE:
1794
1795 iommu_uninit_device(dev);
1796
1797 default:
1798 goto out;
1799 }
1800
1801 device_flush_dte(dev);
1802 iommu_completion_wait(iommu);
1803
1804out:
1805 return 0;
1806}
1807
1808static struct notifier_block device_nb = {
1809 .notifier_call = device_change_notifier,
1810};
1811
1812void amd_iommu_init_notifier(void)
1813{
1814 bus_register_notifier(&pci_bus_type, &device_nb);
1815}
1816
1817/*****************************************************************************
1818 *
1819 * The next functions belong to the dma_ops mapping/unmapping code.
1820 *
1821 *****************************************************************************/
1822
1823/*
1824 * In the dma_ops path we only have the struct device. This function
1825 * finds the corresponding IOMMU, the protection domain and the
1826 * requestor id for a given device.
1827 * If the device is not yet associated with a domain this is also done
1828 * in this function.
1829 */
1830static struct protection_domain *get_domain(struct device *dev)
1831{
1832 struct protection_domain *domain;
1833 struct dma_ops_domain *dma_dom;
1834 u16 devid = get_device_id(dev);
1835
1836 if (!check_device(dev))
1837 return ERR_PTR(-EINVAL);
1838
1839 domain = domain_for_device(dev);
1840 if (domain != NULL && !dma_ops_domain(domain))
1841 return ERR_PTR(-EBUSY);
1842
1843 if (domain != NULL)
1844 return domain;
1845
1846 /* Device not bount yet - bind it */
1847 dma_dom = find_protection_domain(devid);
1848 if (!dma_dom)
1849 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
1850 attach_device(dev, &dma_dom->domain);
1851 DUMP_printk("Using protection domain %d for device %s\n",
1852 dma_dom->domain.id, dev_name(dev));
1853
1854 return &dma_dom->domain;
1855}
1856
1857static void update_device_table(struct protection_domain *domain)
1858{
1859 struct iommu_dev_data *dev_data;
1860
1861 list_for_each_entry(dev_data, &domain->dev_list, list) {
1862 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
1863 u16 devid = get_device_id(dev_data->dev);
1864 set_dte_entry(devid, domain, pci_ats_enabled(pdev));
1865 }
1866}
1867
1868static void update_domain(struct protection_domain *domain)
1869{
1870 if (!domain->updated)
1871 return;
1872
1873 update_device_table(domain);
1874
1875 domain_flush_devices(domain);
1876 domain_flush_tlb_pde(domain);
1877
1878 domain->updated = false;
1879}
1880
1881/*
1882 * This function fetches the PTE for a given address in the aperture
1883 */
1884static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1885 unsigned long address)
1886{
1887 struct aperture_range *aperture;
1888 u64 *pte, *pte_page;
1889
1890 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1891 if (!aperture)
1892 return NULL;
1893
1894 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1895 if (!pte) {
1896 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
1897 GFP_ATOMIC);
1898 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1899 } else
1900 pte += PM_LEVEL_INDEX(0, address);
1901
1902 update_domain(&dom->domain);
1903
1904 return pte;
1905}
1906
1907/*
1908 * This is the generic map function. It maps one 4kb page at paddr to
1909 * the given address in the DMA address space for the domain.
1910 */
1911static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
1912 unsigned long address,
1913 phys_addr_t paddr,
1914 int direction)
1915{
1916 u64 *pte, __pte;
1917
1918 WARN_ON(address > dom->aperture_size);
1919
1920 paddr &= PAGE_MASK;
1921
1922 pte = dma_ops_get_pte(dom, address);
1923 if (!pte)
1924 return DMA_ERROR_CODE;
1925
1926 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1927
1928 if (direction == DMA_TO_DEVICE)
1929 __pte |= IOMMU_PTE_IR;
1930 else if (direction == DMA_FROM_DEVICE)
1931 __pte |= IOMMU_PTE_IW;
1932 else if (direction == DMA_BIDIRECTIONAL)
1933 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
1934
1935 WARN_ON(*pte);
1936
1937 *pte = __pte;
1938
1939 return (dma_addr_t)address;
1940}
1941
1942/*
1943 * The generic unmapping function for on page in the DMA address space.
1944 */
1945static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
1946 unsigned long address)
1947{
1948 struct aperture_range *aperture;
1949 u64 *pte;
1950
1951 if (address >= dom->aperture_size)
1952 return;
1953
1954 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1955 if (!aperture)
1956 return;
1957
1958 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1959 if (!pte)
1960 return;
1961
1962 pte += PM_LEVEL_INDEX(0, address);
1963
1964 WARN_ON(!*pte);
1965
1966 *pte = 0ULL;
1967}
1968
1969/*
1970 * This function contains common code for mapping of a physically
1971 * contiguous memory region into DMA address space. It is used by all
1972 * mapping functions provided with this IOMMU driver.
1973 * Must be called with the domain lock held.
1974 */
1975static dma_addr_t __map_single(struct device *dev,
1976 struct dma_ops_domain *dma_dom,
1977 phys_addr_t paddr,
1978 size_t size,
1979 int dir,
1980 bool align,
1981 u64 dma_mask)
1982{
1983 dma_addr_t offset = paddr & ~PAGE_MASK;
1984 dma_addr_t address, start, ret;
1985 unsigned int pages;
1986 unsigned long align_mask = 0;
1987 int i;
1988
1989 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
1990 paddr &= PAGE_MASK;
1991
1992 INC_STATS_COUNTER(total_map_requests);
1993
1994 if (pages > 1)
1995 INC_STATS_COUNTER(cross_page);
1996
1997 if (align)
1998 align_mask = (1UL << get_order(size)) - 1;
1999
2000retry:
2001 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2002 dma_mask);
2003 if (unlikely(address == DMA_ERROR_CODE)) {
2004 /*
2005 * setting next_address here will let the address
2006 * allocator only scan the new allocated range in the
2007 * first run. This is a small optimization.
2008 */
2009 dma_dom->next_address = dma_dom->aperture_size;
2010
2011 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
2012 goto out;
2013
2014 /*
2015 * aperture was successfully enlarged by 128 MB, try
2016 * allocation again
2017 */
2018 goto retry;
2019 }
2020
2021 start = address;
2022 for (i = 0; i < pages; ++i) {
2023 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2024 if (ret == DMA_ERROR_CODE)
2025 goto out_unmap;
2026
2027 paddr += PAGE_SIZE;
2028 start += PAGE_SIZE;
2029 }
2030 address += offset;
2031
2032 ADD_STATS_COUNTER(alloced_io_mem, size);
2033
2034 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
2035 domain_flush_tlb(&dma_dom->domain);
2036 dma_dom->need_flush = false;
2037 } else if (unlikely(amd_iommu_np_cache))
2038 domain_flush_pages(&dma_dom->domain, address, size);
2039
2040out:
2041 return address;
2042
2043out_unmap:
2044
2045 for (--i; i >= 0; --i) {
2046 start -= PAGE_SIZE;
2047 dma_ops_domain_unmap(dma_dom, start);
2048 }
2049
2050 dma_ops_free_addresses(dma_dom, address, pages);
2051
2052 return DMA_ERROR_CODE;
2053}
2054
2055/*
2056 * Does the reverse of the __map_single function. Must be called with
2057 * the domain lock held too
2058 */
2059static void __unmap_single(struct dma_ops_domain *dma_dom,
2060 dma_addr_t dma_addr,
2061 size_t size,
2062 int dir)
2063{
2064 dma_addr_t flush_addr;
2065 dma_addr_t i, start;
2066 unsigned int pages;
2067
2068 if ((dma_addr == DMA_ERROR_CODE) ||
2069 (dma_addr + size > dma_dom->aperture_size))
2070 return;
2071
2072 flush_addr = dma_addr;
2073 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2074 dma_addr &= PAGE_MASK;
2075 start = dma_addr;
2076
2077 for (i = 0; i < pages; ++i) {
2078 dma_ops_domain_unmap(dma_dom, start);
2079 start += PAGE_SIZE;
2080 }
2081
2082 SUB_STATS_COUNTER(alloced_io_mem, size);
2083
2084 dma_ops_free_addresses(dma_dom, dma_addr, pages);
2085
2086 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
2087 domain_flush_pages(&dma_dom->domain, flush_addr, size);
2088 dma_dom->need_flush = false;
2089 }
2090}
2091
2092/*
2093 * The exported map_single function for dma_ops.
2094 */
2095static dma_addr_t map_page(struct device *dev, struct page *page,
2096 unsigned long offset, size_t size,
2097 enum dma_data_direction dir,
2098 struct dma_attrs *attrs)
2099{
2100 unsigned long flags;
2101 struct protection_domain *domain;
2102 dma_addr_t addr;
2103 u64 dma_mask;
2104 phys_addr_t paddr = page_to_phys(page) + offset;
2105
2106 INC_STATS_COUNTER(cnt_map_single);
2107
2108 domain = get_domain(dev);
2109 if (PTR_ERR(domain) == -EINVAL)
2110 return (dma_addr_t)paddr;
2111 else if (IS_ERR(domain))
2112 return DMA_ERROR_CODE;
2113
2114 dma_mask = *dev->dma_mask;
2115
2116 spin_lock_irqsave(&domain->lock, flags);
2117
2118 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
2119 dma_mask);
2120 if (addr == DMA_ERROR_CODE)
2121 goto out;
2122
2123 domain_flush_complete(domain);
2124
2125out:
2126 spin_unlock_irqrestore(&domain->lock, flags);
2127
2128 return addr;
2129}
2130
2131/*
2132 * The exported unmap_single function for dma_ops.
2133 */
2134static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2135 enum dma_data_direction dir, struct dma_attrs *attrs)
2136{
2137 unsigned long flags;
2138 struct protection_domain *domain;
2139
2140 INC_STATS_COUNTER(cnt_unmap_single);
2141
2142 domain = get_domain(dev);
2143 if (IS_ERR(domain))
2144 return;
2145
2146 spin_lock_irqsave(&domain->lock, flags);
2147
2148 __unmap_single(domain->priv, dma_addr, size, dir);
2149
2150 domain_flush_complete(domain);
2151
2152 spin_unlock_irqrestore(&domain->lock, flags);
2153}
2154
2155/*
2156 * This is a special map_sg function which is used if we should map a
2157 * device which is not handled by an AMD IOMMU in the system.
2158 */
2159static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
2160 int nelems, int dir)
2161{
2162 struct scatterlist *s;
2163 int i;
2164
2165 for_each_sg(sglist, s, nelems, i) {
2166 s->dma_address = (dma_addr_t)sg_phys(s);
2167 s->dma_length = s->length;
2168 }
2169
2170 return nelems;
2171}
2172
2173/*
2174 * The exported map_sg function for dma_ops (handles scatter-gather
2175 * lists).
2176 */
2177static int map_sg(struct device *dev, struct scatterlist *sglist,
2178 int nelems, enum dma_data_direction dir,
2179 struct dma_attrs *attrs)
2180{
2181 unsigned long flags;
2182 struct protection_domain *domain;
2183 int i;
2184 struct scatterlist *s;
2185 phys_addr_t paddr;
2186 int mapped_elems = 0;
2187 u64 dma_mask;
2188
2189 INC_STATS_COUNTER(cnt_map_sg);
2190
2191 domain = get_domain(dev);
2192 if (PTR_ERR(domain) == -EINVAL)
2193 return map_sg_no_iommu(dev, sglist, nelems, dir);
2194 else if (IS_ERR(domain))
2195 return 0;
2196
2197 dma_mask = *dev->dma_mask;
2198
2199 spin_lock_irqsave(&domain->lock, flags);
2200
2201 for_each_sg(sglist, s, nelems, i) {
2202 paddr = sg_phys(s);
2203
2204 s->dma_address = __map_single(dev, domain->priv,
2205 paddr, s->length, dir, false,
2206 dma_mask);
2207
2208 if (s->dma_address) {
2209 s->dma_length = s->length;
2210 mapped_elems++;
2211 } else
2212 goto unmap;
2213 }
2214
2215 domain_flush_complete(domain);
2216
2217out:
2218 spin_unlock_irqrestore(&domain->lock, flags);
2219
2220 return mapped_elems;
2221unmap:
2222 for_each_sg(sglist, s, mapped_elems, i) {
2223 if (s->dma_address)
2224 __unmap_single(domain->priv, s->dma_address,
2225 s->dma_length, dir);
2226 s->dma_address = s->dma_length = 0;
2227 }
2228
2229 mapped_elems = 0;
2230
2231 goto out;
2232}
2233
2234/*
2235 * The exported map_sg function for dma_ops (handles scatter-gather
2236 * lists).
2237 */
2238static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2239 int nelems, enum dma_data_direction dir,
2240 struct dma_attrs *attrs)
2241{
2242 unsigned long flags;
2243 struct protection_domain *domain;
2244 struct scatterlist *s;
2245 int i;
2246
2247 INC_STATS_COUNTER(cnt_unmap_sg);
2248
2249 domain = get_domain(dev);
2250 if (IS_ERR(domain))
2251 return;
2252
2253 spin_lock_irqsave(&domain->lock, flags);
2254
2255 for_each_sg(sglist, s, nelems, i) {
2256 __unmap_single(domain->priv, s->dma_address,
2257 s->dma_length, dir);
2258 s->dma_address = s->dma_length = 0;
2259 }
2260
2261 domain_flush_complete(domain);
2262
2263 spin_unlock_irqrestore(&domain->lock, flags);
2264}
2265
2266/*
2267 * The exported alloc_coherent function for dma_ops.
2268 */
2269static void *alloc_coherent(struct device *dev, size_t size,
2270 dma_addr_t *dma_addr, gfp_t flag)
2271{
2272 unsigned long flags;
2273 void *virt_addr;
2274 struct protection_domain *domain;
2275 phys_addr_t paddr;
2276 u64 dma_mask = dev->coherent_dma_mask;
2277
2278 INC_STATS_COUNTER(cnt_alloc_coherent);
2279
2280 domain = get_domain(dev);
2281 if (PTR_ERR(domain) == -EINVAL) {
2282 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2283 *dma_addr = __pa(virt_addr);
2284 return virt_addr;
2285 } else if (IS_ERR(domain))
2286 return NULL;
2287
2288 dma_mask = dev->coherent_dma_mask;
2289 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2290 flag |= __GFP_ZERO;
2291
2292 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2293 if (!virt_addr)
2294 return NULL;
2295
2296 paddr = virt_to_phys(virt_addr);
2297
2298 if (!dma_mask)
2299 dma_mask = *dev->dma_mask;
2300
2301 spin_lock_irqsave(&domain->lock, flags);
2302
2303 *dma_addr = __map_single(dev, domain->priv, paddr,
2304 size, DMA_BIDIRECTIONAL, true, dma_mask);
2305
2306 if (*dma_addr == DMA_ERROR_CODE) {
2307 spin_unlock_irqrestore(&domain->lock, flags);
2308 goto out_free;
2309 }
2310
2311 domain_flush_complete(domain);
2312
2313 spin_unlock_irqrestore(&domain->lock, flags);
2314
2315 return virt_addr;
2316
2317out_free:
2318
2319 free_pages((unsigned long)virt_addr, get_order(size));
2320
2321 return NULL;
2322}
2323
2324/*
2325 * The exported free_coherent function for dma_ops.
2326 */
2327static void free_coherent(struct device *dev, size_t size,
2328 void *virt_addr, dma_addr_t dma_addr)
2329{
2330 unsigned long flags;
2331 struct protection_domain *domain;
2332
2333 INC_STATS_COUNTER(cnt_free_coherent);
2334
2335 domain = get_domain(dev);
2336 if (IS_ERR(domain))
2337 goto free_mem;
2338
2339 spin_lock_irqsave(&domain->lock, flags);
2340
2341 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2342
2343 domain_flush_complete(domain);
2344
2345 spin_unlock_irqrestore(&domain->lock, flags);
2346
2347free_mem:
2348 free_pages((unsigned long)virt_addr, get_order(size));
2349}
2350
2351/*
2352 * This function is called by the DMA layer to find out if we can handle a
2353 * particular device. It is part of the dma_ops.
2354 */
2355static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2356{
2357 return check_device(dev);
2358}
2359
2360/*
2361 * The function for pre-allocating protection domains.
2362 *
2363 * If the driver core informs the DMA layer if a driver grabs a device
2364 * we don't need to preallocate the protection domains anymore.
2365 * For now we have to.
2366 */
2367static void prealloc_protection_domains(void)
2368{
2369 struct pci_dev *dev = NULL;
2370 struct dma_ops_domain *dma_dom;
2371 u16 devid;
2372
2373 for_each_pci_dev(dev) {
2374
2375 /* Do we handle this device? */
2376 if (!check_device(&dev->dev))
2377 continue;
2378
2379 /* Is there already any domain for it? */
2380 if (domain_for_device(&dev->dev))
2381 continue;
2382
2383 devid = get_device_id(&dev->dev);
2384
2385 dma_dom = dma_ops_domain_alloc();
2386 if (!dma_dom)
2387 continue;
2388 init_unity_mappings_for_device(dma_dom, devid);
2389 dma_dom->target_dev = devid;
2390
2391 attach_device(&dev->dev, &dma_dom->domain);
2392
2393 list_add_tail(&dma_dom->list, &iommu_pd_list);
2394 }
2395}
2396
2397static struct dma_map_ops amd_iommu_dma_ops = {
2398 .alloc_coherent = alloc_coherent,
2399 .free_coherent = free_coherent,
2400 .map_page = map_page,
2401 .unmap_page = unmap_page,
2402 .map_sg = map_sg,
2403 .unmap_sg = unmap_sg,
2404 .dma_supported = amd_iommu_dma_supported,
2405};
2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2424/*
2425 * The function which clues the AMD IOMMU driver into dma_ops.
2426 */
2427
2428void __init amd_iommu_init_api(void)
2429{
2430 register_iommu(&amd_iommu_ops);
2431}
2432
2433int __init amd_iommu_init_dma_ops(void)
2434{
2435 struct amd_iommu *iommu;
2436 int ret, unhandled;
2437
2438 /*
2439 * first allocate a default protection domain for every IOMMU we
2440 * found in the system. Devices not assigned to any other
2441 * protection domain will be assigned to the default one.
2442 */
2443 for_each_iommu(iommu) {
2444 iommu->default_dom = dma_ops_domain_alloc();
2445 if (iommu->default_dom == NULL)
2446 return -ENOMEM;
2447 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
2448 ret = iommu_init_unity_mappings(iommu);
2449 if (ret)
2450 goto free_domains;
2451 }
2452
2453 /*
2454 * Pre-allocate the protection domains for each device.
2455 */
2456 prealloc_protection_domains();
2457
2458 iommu_detected = 1;
2459 swiotlb = 0;
2460
2461 /* Make the driver finally visible to the drivers */
2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2467
2468 amd_iommu_stats_init();
2469
2470 return 0;
2471
2472free_domains:
2473
2474 for_each_iommu(iommu) {
2475 if (iommu->default_dom)
2476 dma_ops_domain_free(iommu->default_dom);
2477 }
2478
2479 return ret;
2480}
2481
2482/*****************************************************************************
2483 *
2484 * The following functions belong to the exported interface of AMD IOMMU
2485 *
2486 * This interface allows access to lower level functions of the IOMMU
2487 * like protection domain handling and assignement of devices to domains
2488 * which is not possible with the dma_ops interface.
2489 *
2490 *****************************************************************************/
2491
2492static void cleanup_domain(struct protection_domain *domain)
2493{
2494 struct iommu_dev_data *dev_data, *next;
2495 unsigned long flags;
2496
2497 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2498
2499 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2500 struct device *dev = dev_data->dev;
2501
2502 __detach_device(dev);
2503 atomic_set(&dev_data->bind, 0);
2504 }
2505
2506 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2507}
2508
2509static void protection_domain_free(struct protection_domain *domain)
2510{
2511 if (!domain)
2512 return;
2513
2514 del_domain_from_list(domain);
2515
2516 if (domain->id)
2517 domain_id_free(domain->id);
2518
2519 kfree(domain);
2520}
2521
2522static struct protection_domain *protection_domain_alloc(void)
2523{
2524 struct protection_domain *domain;
2525
2526 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2527 if (!domain)
2528 return NULL;
2529
2530 spin_lock_init(&domain->lock);
2531 mutex_init(&domain->api_lock);
2532 domain->id = domain_id_alloc();
2533 if (!domain->id)
2534 goto out_err;
2535 INIT_LIST_HEAD(&domain->dev_list);
2536
2537 add_domain_to_list(domain);
2538
2539 return domain;
2540
2541out_err:
2542 kfree(domain);
2543
2544 return NULL;
2545}
2546
2547static int amd_iommu_domain_init(struct iommu_domain *dom)
2548{
2549 struct protection_domain *domain;
2550
2551 domain = protection_domain_alloc();
2552 if (!domain)
2553 goto out_free;
2554
2555 domain->mode = PAGE_MODE_3_LEVEL;
2556 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2557 if (!domain->pt_root)
2558 goto out_free;
2559
2560 dom->priv = domain;
2561
2562 return 0;
2563
2564out_free:
2565 protection_domain_free(domain);
2566
2567 return -ENOMEM;
2568}
2569
2570static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2571{
2572 struct protection_domain *domain = dom->priv;
2573
2574 if (!domain)
2575 return;
2576
2577 if (domain->dev_cnt > 0)
2578 cleanup_domain(domain);
2579
2580 BUG_ON(domain->dev_cnt != 0);
2581
2582 free_pagetable(domain);
2583
2584 protection_domain_free(domain);
2585
2586 dom->priv = NULL;
2587}
2588
2589static void amd_iommu_detach_device(struct iommu_domain *dom,
2590 struct device *dev)
2591{
2592 struct iommu_dev_data *dev_data = dev->archdata.iommu;
2593 struct amd_iommu *iommu;
2594 u16 devid;
2595
2596 if (!check_device(dev))
2597 return;
2598
2599 devid = get_device_id(dev);
2600
2601 if (dev_data->domain != NULL)
2602 detach_device(dev);
2603
2604 iommu = amd_iommu_rlookup_table[devid];
2605 if (!iommu)
2606 return;
2607
2608 device_flush_dte(dev);
2609 iommu_completion_wait(iommu);
2610}
2611
2612static int amd_iommu_attach_device(struct iommu_domain *dom,
2613 struct device *dev)
2614{
2615 struct protection_domain *domain = dom->priv;
2616 struct iommu_dev_data *dev_data;
2617 struct amd_iommu *iommu;
2618 int ret;
2619 u16 devid;
2620
2621 if (!check_device(dev))
2622 return -EINVAL;
2623
2624 dev_data = dev->archdata.iommu;
2625
2626 devid = get_device_id(dev);
2627
2628 iommu = amd_iommu_rlookup_table[devid];
2629 if (!iommu)
2630 return -EINVAL;
2631
2632 if (dev_data->domain)
2633 detach_device(dev);
2634
2635 ret = attach_device(dev, domain);
2636
2637 iommu_completion_wait(iommu);
2638
2639 return ret;
2640}
2641
2642static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2643 phys_addr_t paddr, int gfp_order, int iommu_prot)
2644{
2645 unsigned long page_size = 0x1000UL << gfp_order;
2646 struct protection_domain *domain = dom->priv;
2647 int prot = 0;
2648 int ret;
2649
2650 if (iommu_prot & IOMMU_READ)
2651 prot |= IOMMU_PROT_IR;
2652 if (iommu_prot & IOMMU_WRITE)
2653 prot |= IOMMU_PROT_IW;
2654
2655 mutex_lock(&domain->api_lock);
2656 ret = iommu_map_page(domain, iova, paddr, prot, page_size);
2657 mutex_unlock(&domain->api_lock);
2658
2659 return ret;
2660}
2661
2662static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2663 int gfp_order)
2664{
2665 struct protection_domain *domain = dom->priv;
2666 unsigned long page_size, unmap_size;
2667
2668 page_size = 0x1000UL << gfp_order;
2669
2670 mutex_lock(&domain->api_lock);
2671 unmap_size = iommu_unmap_page(domain, iova, page_size);
2672 mutex_unlock(&domain->api_lock);
2673
2674 domain_flush_tlb_pde(domain);
2675
2676 return get_order(unmap_size);
2677}
2678
2679static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2680 unsigned long iova)
2681{
2682 struct protection_domain *domain = dom->priv;
2683 unsigned long offset_mask;
2684 phys_addr_t paddr;
2685 u64 *pte, __pte;
2686
2687 pte = fetch_pte(domain, iova);
2688
2689 if (!pte || !IOMMU_PTE_PRESENT(*pte))
2690 return 0;
2691
2692 if (PM_PTE_LEVEL(*pte) == 0)
2693 offset_mask = PAGE_SIZE - 1;
2694 else
2695 offset_mask = PTE_PAGE_SIZE(*pte) - 1;
2696
2697 __pte = *pte & PM_ADDR_MASK;
2698 paddr = (__pte & ~offset_mask) | (iova & offset_mask);
2699
2700 return paddr;
2701}
2702
2703static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
2704 unsigned long cap)
2705{
2706 switch (cap) {
2707 case IOMMU_CAP_CACHE_COHERENCY:
2708 return 1;
2709 }
2710
2711 return 0;
2712}
2713
2714static struct iommu_ops amd_iommu_ops = {
2715 .domain_init = amd_iommu_domain_init,
2716 .domain_destroy = amd_iommu_domain_destroy,
2717 .attach_dev = amd_iommu_attach_device,
2718 .detach_dev = amd_iommu_detach_device,
2719 .map = amd_iommu_map,
2720 .unmap = amd_iommu_unmap,
2721 .iova_to_phys = amd_iommu_iova_to_phys,
2722 .domain_has_cap = amd_iommu_domain_has_cap,
2723};
2724
2725/*****************************************************************************
2726 *
2727 * The next functions do a basic initialization of IOMMU for pass through
2728 * mode
2729 *
2730 * In passthrough mode the IOMMU is initialized and enabled but not used for
2731 * DMA-API translation.
2732 *
2733 *****************************************************************************/
2734
2735int __init amd_iommu_init_passthrough(void)
2736{
2737 struct amd_iommu *iommu;
2738 struct pci_dev *dev = NULL;
2739 u16 devid;
2740
2741 /* allocate passthrough domain */
2742 pt_domain = protection_domain_alloc();
2743 if (!pt_domain)
2744 return -ENOMEM;
2745
2746 pt_domain->mode |= PAGE_MODE_NONE;
2747
2748 for_each_pci_dev(dev) {
2749 if (!check_device(&dev->dev))
2750 continue;
2751
2752 devid = get_device_id(&dev->dev);
2753
2754 iommu = amd_iommu_rlookup_table[devid];
2755 if (!iommu)
2756 continue;
2757
2758 attach_device(&dev->dev, pt_domain);
2759 }
2760
2761 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2762
2763 return 0;
2764}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
deleted file mode 100644
index bfc8453bd98d..000000000000
--- a/arch/x86/kernel/amd_iommu_init.c
+++ /dev/null
@@ -1,1572 +0,0 @@
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <asm/pci-direct.h>
28#include <asm/amd_iommu_proto.h>
29#include <asm/amd_iommu_types.h>
30#include <asm/amd_iommu.h>
31#include <asm/iommu.h>
32#include <asm/gart.h>
33#include <asm/x86_init.h>
34#include <asm/iommu_table.h>
35/*
36 * definitions for the ACPI scanning code
37 */
38#define IVRS_HEADER_LENGTH 48
39
40#define ACPI_IVHD_TYPE 0x10
41#define ACPI_IVMD_TYPE_ALL 0x20
42#define ACPI_IVMD_TYPE 0x21
43#define ACPI_IVMD_TYPE_RANGE 0x22
44
45#define IVHD_DEV_ALL 0x01
46#define IVHD_DEV_SELECT 0x02
47#define IVHD_DEV_SELECT_RANGE_START 0x03
48#define IVHD_DEV_RANGE_END 0x04
49#define IVHD_DEV_ALIAS 0x42
50#define IVHD_DEV_ALIAS_RANGE 0x43
51#define IVHD_DEV_EXT_SELECT 0x46
52#define IVHD_DEV_EXT_SELECT_RANGE 0x47
53
54#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
55#define IVHD_FLAG_PASSPW_EN_MASK 0x02
56#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
57#define IVHD_FLAG_ISOC_EN_MASK 0x08
58
59#define IVMD_FLAG_EXCL_RANGE 0x08
60#define IVMD_FLAG_UNITY_MAP 0x01
61
62#define ACPI_DEVFLAG_INITPASS 0x01
63#define ACPI_DEVFLAG_EXTINT 0x02
64#define ACPI_DEVFLAG_NMI 0x04
65#define ACPI_DEVFLAG_SYSMGT1 0x10
66#define ACPI_DEVFLAG_SYSMGT2 0x20
67#define ACPI_DEVFLAG_LINT0 0x40
68#define ACPI_DEVFLAG_LINT1 0x80
69#define ACPI_DEVFLAG_ATSDIS 0x10000000
70
71/*
72 * ACPI table definitions
73 *
74 * These data structures are laid over the table to parse the important values
75 * out of it.
76 */
77
78/*
79 * structure describing one IOMMU in the ACPI table. Typically followed by one
80 * or more ivhd_entrys.
81 */
82struct ivhd_header {
83 u8 type;
84 u8 flags;
85 u16 length;
86 u16 devid;
87 u16 cap_ptr;
88 u64 mmio_phys;
89 u16 pci_seg;
90 u16 info;
91 u32 reserved;
92} __attribute__((packed));
93
94/*
95 * A device entry describing which devices a specific IOMMU translates and
96 * which requestor ids they use.
97 */
98struct ivhd_entry {
99 u8 type;
100 u16 devid;
101 u8 flags;
102 u32 ext;
103} __attribute__((packed));
104
105/*
106 * An AMD IOMMU memory definition structure. It defines things like exclusion
107 * ranges for devices and regions that should be unity mapped.
108 */
109struct ivmd_header {
110 u8 type;
111 u8 flags;
112 u16 length;
113 u16 devid;
114 u16 aux;
115 u64 resv;
116 u64 range_start;
117 u64 range_length;
118} __attribute__((packed));
119
120bool amd_iommu_dump;
121
122static int __initdata amd_iommu_detected;
123static bool __initdata amd_iommu_disabled;
124
125u16 amd_iommu_last_bdf; /* largest PCI device id we have
126 to handle */
127LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
128 we find in ACPI */
129bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
130
131LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
132 system */
133
134/* Array to assign indices to IOMMUs*/
135struct amd_iommu *amd_iommus[MAX_IOMMUS];
136int amd_iommus_present;
137
138/* IOMMUs have a non-present cache? */
139bool amd_iommu_np_cache __read_mostly;
140bool amd_iommu_iotlb_sup __read_mostly = true;
141
142/*
143 * The ACPI table parsing functions set this variable on an error
144 */
145static int __initdata amd_iommu_init_err;
146
147/*
148 * List of protection domains - used during resume
149 */
150LIST_HEAD(amd_iommu_pd_list);
151spinlock_t amd_iommu_pd_lock;
152
153/*
154 * Pointer to the device table which is shared by all AMD IOMMUs
155 * it is indexed by the PCI device id or the HT unit id and contains
156 * information about the domain the device belongs to as well as the
157 * page table root pointer.
158 */
159struct dev_table_entry *amd_iommu_dev_table;
160
161/*
162 * The alias table is a driver specific data structure which contains the
163 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
164 * More than one device can share the same requestor id.
165 */
166u16 *amd_iommu_alias_table;
167
168/*
169 * The rlookup table is used to find the IOMMU which is responsible
170 * for a specific device. It is also indexed by the PCI device id.
171 */
172struct amd_iommu **amd_iommu_rlookup_table;
173
174/*
175 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
176 * to know which ones are already in use.
177 */
178unsigned long *amd_iommu_pd_alloc_bitmap;
179
180static u32 dev_table_size; /* size of the device table */
181static u32 alias_table_size; /* size of the alias table */
182static u32 rlookup_table_size; /* size if the rlookup table */
183
184/*
185 * This function flushes all internal caches of
186 * the IOMMU used by this driver.
187 */
188extern void iommu_flush_all_caches(struct amd_iommu *iommu);
189
190static inline void update_last_devid(u16 devid)
191{
192 if (devid > amd_iommu_last_bdf)
193 amd_iommu_last_bdf = devid;
194}
195
196static inline unsigned long tbl_size(int entry_size)
197{
198 unsigned shift = PAGE_SHIFT +
199 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
200
201 return 1UL << shift;
202}
203
204/* Access to l1 and l2 indexed register spaces */
205
206static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
207{
208 u32 val;
209
210 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
211 pci_read_config_dword(iommu->dev, 0xfc, &val);
212 return val;
213}
214
215static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
216{
217 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
218 pci_write_config_dword(iommu->dev, 0xfc, val);
219 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
220}
221
222static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
223{
224 u32 val;
225
226 pci_write_config_dword(iommu->dev, 0xf0, address);
227 pci_read_config_dword(iommu->dev, 0xf4, &val);
228 return val;
229}
230
231static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
232{
233 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
234 pci_write_config_dword(iommu->dev, 0xf4, val);
235}
236
237/****************************************************************************
238 *
239 * AMD IOMMU MMIO register space handling functions
240 *
241 * These functions are used to program the IOMMU device registers in
242 * MMIO space required for that driver.
243 *
244 ****************************************************************************/
245
246/*
247 * This function set the exclusion range in the IOMMU. DMA accesses to the
248 * exclusion range are passed through untranslated
249 */
250static void iommu_set_exclusion_range(struct amd_iommu *iommu)
251{
252 u64 start = iommu->exclusion_start & PAGE_MASK;
253 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
254 u64 entry;
255
256 if (!iommu->exclusion_start)
257 return;
258
259 entry = start | MMIO_EXCL_ENABLE_MASK;
260 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
261 &entry, sizeof(entry));
262
263 entry = limit;
264 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
265 &entry, sizeof(entry));
266}
267
268/* Programs the physical address of the device table into the IOMMU hardware */
269static void __init iommu_set_device_table(struct amd_iommu *iommu)
270{
271 u64 entry;
272
273 BUG_ON(iommu->mmio_base == NULL);
274
275 entry = virt_to_phys(amd_iommu_dev_table);
276 entry |= (dev_table_size >> 12) - 1;
277 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
278 &entry, sizeof(entry));
279}
280
281/* Generic functions to enable/disable certain features of the IOMMU. */
282static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
283{
284 u32 ctrl;
285
286 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
287 ctrl |= (1 << bit);
288 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
289}
290
291static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
292{
293 u32 ctrl;
294
295 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
296 ctrl &= ~(1 << bit);
297 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
298}
299
300/* Function to enable the hardware */
301static void iommu_enable(struct amd_iommu *iommu)
302{
303 static const char * const feat_str[] = {
304 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
305 "IA", "GA", "HE", "PC", NULL
306 };
307 int i;
308
309 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
310 dev_name(&iommu->dev->dev), iommu->cap_ptr);
311
312 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
313 printk(KERN_CONT " extended features: ");
314 for (i = 0; feat_str[i]; ++i)
315 if (iommu_feature(iommu, (1ULL << i)))
316 printk(KERN_CONT " %s", feat_str[i]);
317 }
318 printk(KERN_CONT "\n");
319
320 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
321}
322
323static void iommu_disable(struct amd_iommu *iommu)
324{
325 /* Disable command buffer */
326 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
327
328 /* Disable event logging and event interrupts */
329 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
330 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
331
332 /* Disable IOMMU hardware itself */
333 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
334}
335
336/*
337 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
338 * the system has one.
339 */
340static u8 * __init iommu_map_mmio_space(u64 address)
341{
342 u8 *ret;
343
344 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
345 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
346 address);
347 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
348 return NULL;
349 }
350
351 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
352 if (ret != NULL)
353 return ret;
354
355 release_mem_region(address, MMIO_REGION_LENGTH);
356
357 return NULL;
358}
359
360static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
361{
362 if (iommu->mmio_base)
363 iounmap(iommu->mmio_base);
364 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
365}
366
367/****************************************************************************
368 *
369 * The functions below belong to the first pass of AMD IOMMU ACPI table
370 * parsing. In this pass we try to find out the highest device id this
371 * code has to handle. Upon this information the size of the shared data
372 * structures is determined later.
373 *
374 ****************************************************************************/
375
376/*
377 * This function calculates the length of a given IVHD entry
378 */
379static inline int ivhd_entry_length(u8 *ivhd)
380{
381 return 0x04 << (*ivhd >> 6);
382}
383
384/*
385 * This function reads the last device id the IOMMU has to handle from the PCI
386 * capability header for this IOMMU
387 */
388static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
389{
390 u32 cap;
391
392 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
393 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
394
395 return 0;
396}
397
398/*
399 * After reading the highest device id from the IOMMU PCI capability header
400 * this function looks if there is a higher device id defined in the ACPI table
401 */
402static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
403{
404 u8 *p = (void *)h, *end = (void *)h;
405 struct ivhd_entry *dev;
406
407 p += sizeof(*h);
408 end += h->length;
409
410 find_last_devid_on_pci(PCI_BUS(h->devid),
411 PCI_SLOT(h->devid),
412 PCI_FUNC(h->devid),
413 h->cap_ptr);
414
415 while (p < end) {
416 dev = (struct ivhd_entry *)p;
417 switch (dev->type) {
418 case IVHD_DEV_SELECT:
419 case IVHD_DEV_RANGE_END:
420 case IVHD_DEV_ALIAS:
421 case IVHD_DEV_EXT_SELECT:
422 /* all the above subfield types refer to device ids */
423 update_last_devid(dev->devid);
424 break;
425 default:
426 break;
427 }
428 p += ivhd_entry_length(p);
429 }
430
431 WARN_ON(p != end);
432
433 return 0;
434}
435
436/*
437 * Iterate over all IVHD entries in the ACPI table and find the highest device
438 * id which we need to handle. This is the first of three functions which parse
439 * the ACPI table. So we check the checksum here.
440 */
441static int __init find_last_devid_acpi(struct acpi_table_header *table)
442{
443 int i;
444 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
445 struct ivhd_header *h;
446
447 /*
448 * Validate checksum here so we don't need to do it when
449 * we actually parse the table
450 */
451 for (i = 0; i < table->length; ++i)
452 checksum += p[i];
453 if (checksum != 0) {
454 /* ACPI table corrupt */
455 amd_iommu_init_err = -ENODEV;
456 return 0;
457 }
458
459 p += IVRS_HEADER_LENGTH;
460
461 end += table->length;
462 while (p < end) {
463 h = (struct ivhd_header *)p;
464 switch (h->type) {
465 case ACPI_IVHD_TYPE:
466 find_last_devid_from_ivhd(h);
467 break;
468 default:
469 break;
470 }
471 p += h->length;
472 }
473 WARN_ON(p != end);
474
475 return 0;
476}
477
478/****************************************************************************
479 *
480 * The following functions belong the the code path which parses the ACPI table
481 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
482 * data structures, initialize the device/alias/rlookup table and also
483 * basically initialize the hardware.
484 *
485 ****************************************************************************/
486
487/*
488 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
489 * write commands to that buffer later and the IOMMU will execute them
490 * asynchronously
491 */
492static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
493{
494 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
495 get_order(CMD_BUFFER_SIZE));
496
497 if (cmd_buf == NULL)
498 return NULL;
499
500 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
501
502 return cmd_buf;
503}
504
505/*
506 * This function resets the command buffer if the IOMMU stopped fetching
507 * commands from it.
508 */
509void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
510{
511 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
512
513 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
514 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
515
516 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
517}
518
519/*
520 * This function writes the command buffer address to the hardware and
521 * enables it.
522 */
523static void iommu_enable_command_buffer(struct amd_iommu *iommu)
524{
525 u64 entry;
526
527 BUG_ON(iommu->cmd_buf == NULL);
528
529 entry = (u64)virt_to_phys(iommu->cmd_buf);
530 entry |= MMIO_CMD_SIZE_512;
531
532 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
533 &entry, sizeof(entry));
534
535 amd_iommu_reset_cmd_buffer(iommu);
536 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
537}
538
539static void __init free_command_buffer(struct amd_iommu *iommu)
540{
541 free_pages((unsigned long)iommu->cmd_buf,
542 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
543}
544
545/* allocates the memory where the IOMMU will log its events to */
546static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
547{
548 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
549 get_order(EVT_BUFFER_SIZE));
550
551 if (iommu->evt_buf == NULL)
552 return NULL;
553
554 iommu->evt_buf_size = EVT_BUFFER_SIZE;
555
556 return iommu->evt_buf;
557}
558
559static void iommu_enable_event_buffer(struct amd_iommu *iommu)
560{
561 u64 entry;
562
563 BUG_ON(iommu->evt_buf == NULL);
564
565 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
566
567 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
568 &entry, sizeof(entry));
569
570 /* set head and tail to zero manually */
571 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
572 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
573
574 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
575}
576
577static void __init free_event_buffer(struct amd_iommu *iommu)
578{
579 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
580}
581
582/* sets a specific bit in the device table entry. */
583static void set_dev_entry_bit(u16 devid, u8 bit)
584{
585 int i = (bit >> 5) & 0x07;
586 int _bit = bit & 0x1f;
587
588 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
589}
590
591static int get_dev_entry_bit(u16 devid, u8 bit)
592{
593 int i = (bit >> 5) & 0x07;
594 int _bit = bit & 0x1f;
595
596 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
597}
598
599
600void amd_iommu_apply_erratum_63(u16 devid)
601{
602 int sysmgt;
603
604 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
605 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
606
607 if (sysmgt == 0x01)
608 set_dev_entry_bit(devid, DEV_ENTRY_IW);
609}
610
611/* Writes the specific IOMMU for a device into the rlookup table */
612static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
613{
614 amd_iommu_rlookup_table[devid] = iommu;
615}
616
617/*
618 * This function takes the device specific flags read from the ACPI
619 * table and sets up the device table entry with that information
620 */
621static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
622 u16 devid, u32 flags, u32 ext_flags)
623{
624 if (flags & ACPI_DEVFLAG_INITPASS)
625 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
626 if (flags & ACPI_DEVFLAG_EXTINT)
627 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
628 if (flags & ACPI_DEVFLAG_NMI)
629 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
630 if (flags & ACPI_DEVFLAG_SYSMGT1)
631 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
632 if (flags & ACPI_DEVFLAG_SYSMGT2)
633 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
634 if (flags & ACPI_DEVFLAG_LINT0)
635 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
636 if (flags & ACPI_DEVFLAG_LINT1)
637 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
638
639 amd_iommu_apply_erratum_63(devid);
640
641 set_iommu_for_device(iommu, devid);
642}
643
644/*
645 * Reads the device exclusion range from ACPI and initialize IOMMU with
646 * it
647 */
648static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
649{
650 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
651
652 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
653 return;
654
655 if (iommu) {
656 /*
657 * We only can configure exclusion ranges per IOMMU, not
658 * per device. But we can enable the exclusion range per
659 * device. This is done here
660 */
661 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
662 iommu->exclusion_start = m->range_start;
663 iommu->exclusion_length = m->range_length;
664 }
665}
666
667/*
668 * This function reads some important data from the IOMMU PCI space and
669 * initializes the driver data structure with it. It reads the hardware
670 * capabilities and the first/last device entries
671 */
672static void __init init_iommu_from_pci(struct amd_iommu *iommu)
673{
674 int cap_ptr = iommu->cap_ptr;
675 u32 range, misc, low, high;
676 int i, j;
677
678 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
679 &iommu->cap);
680 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
681 &range);
682 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
683 &misc);
684
685 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
686 MMIO_GET_FD(range));
687 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
688 MMIO_GET_LD(range));
689 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
690
691 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
692 amd_iommu_iotlb_sup = false;
693
694 /* read extended feature bits */
695 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
696 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
697
698 iommu->features = ((u64)high << 32) | low;
699
700 if (!is_rd890_iommu(iommu->dev))
701 return;
702
703 /*
704 * Some rd890 systems may not be fully reconfigured by the BIOS, so
705 * it's necessary for us to store this information so it can be
706 * reprogrammed on resume
707 */
708
709 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
710 &iommu->stored_addr_lo);
711 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
712 &iommu->stored_addr_hi);
713
714 /* Low bit locks writes to configuration space */
715 iommu->stored_addr_lo &= ~1;
716
717 for (i = 0; i < 6; i++)
718 for (j = 0; j < 0x12; j++)
719 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
720
721 for (i = 0; i < 0x83; i++)
722 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
723}
724
725/*
726 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
727 * initializes the hardware and our data structures with it.
728 */
729static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
730 struct ivhd_header *h)
731{
732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0;
734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 dev_i, ext_flags = 0;
736 bool alias = false;
737 struct ivhd_entry *e;
738
739 /*
740 * First save the recommended feature enable bits from ACPI
741 */
742 iommu->acpi_flags = h->flags;
743
744 /*
745 * Done. Now parse the device entries
746 */
747 p += sizeof(struct ivhd_header);
748 end += h->length;
749
750
751 while (p < end) {
752 e = (struct ivhd_entry *)p;
753 switch (e->type) {
754 case IVHD_DEV_ALL:
755
756 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
757 " last device %02x:%02x.%x flags: %02x\n",
758 PCI_BUS(iommu->first_device),
759 PCI_SLOT(iommu->first_device),
760 PCI_FUNC(iommu->first_device),
761 PCI_BUS(iommu->last_device),
762 PCI_SLOT(iommu->last_device),
763 PCI_FUNC(iommu->last_device),
764 e->flags);
765
766 for (dev_i = iommu->first_device;
767 dev_i <= iommu->last_device; ++dev_i)
768 set_dev_entry_from_acpi(iommu, dev_i,
769 e->flags, 0);
770 break;
771 case IVHD_DEV_SELECT:
772
773 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
774 "flags: %02x\n",
775 PCI_BUS(e->devid),
776 PCI_SLOT(e->devid),
777 PCI_FUNC(e->devid),
778 e->flags);
779
780 devid = e->devid;
781 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
782 break;
783 case IVHD_DEV_SELECT_RANGE_START:
784
785 DUMP_printk(" DEV_SELECT_RANGE_START\t "
786 "devid: %02x:%02x.%x flags: %02x\n",
787 PCI_BUS(e->devid),
788 PCI_SLOT(e->devid),
789 PCI_FUNC(e->devid),
790 e->flags);
791
792 devid_start = e->devid;
793 flags = e->flags;
794 ext_flags = 0;
795 alias = false;
796 break;
797 case IVHD_DEV_ALIAS:
798
799 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
800 "flags: %02x devid_to: %02x:%02x.%x\n",
801 PCI_BUS(e->devid),
802 PCI_SLOT(e->devid),
803 PCI_FUNC(e->devid),
804 e->flags,
805 PCI_BUS(e->ext >> 8),
806 PCI_SLOT(e->ext >> 8),
807 PCI_FUNC(e->ext >> 8));
808
809 devid = e->devid;
810 devid_to = e->ext >> 8;
811 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
812 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
813 amd_iommu_alias_table[devid] = devid_to;
814 break;
815 case IVHD_DEV_ALIAS_RANGE:
816
817 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
818 "devid: %02x:%02x.%x flags: %02x "
819 "devid_to: %02x:%02x.%x\n",
820 PCI_BUS(e->devid),
821 PCI_SLOT(e->devid),
822 PCI_FUNC(e->devid),
823 e->flags,
824 PCI_BUS(e->ext >> 8),
825 PCI_SLOT(e->ext >> 8),
826 PCI_FUNC(e->ext >> 8));
827
828 devid_start = e->devid;
829 flags = e->flags;
830 devid_to = e->ext >> 8;
831 ext_flags = 0;
832 alias = true;
833 break;
834 case IVHD_DEV_EXT_SELECT:
835
836 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
837 "flags: %02x ext: %08x\n",
838 PCI_BUS(e->devid),
839 PCI_SLOT(e->devid),
840 PCI_FUNC(e->devid),
841 e->flags, e->ext);
842
843 devid = e->devid;
844 set_dev_entry_from_acpi(iommu, devid, e->flags,
845 e->ext);
846 break;
847 case IVHD_DEV_EXT_SELECT_RANGE:
848
849 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
850 "%02x:%02x.%x flags: %02x ext: %08x\n",
851 PCI_BUS(e->devid),
852 PCI_SLOT(e->devid),
853 PCI_FUNC(e->devid),
854 e->flags, e->ext);
855
856 devid_start = e->devid;
857 flags = e->flags;
858 ext_flags = e->ext;
859 alias = false;
860 break;
861 case IVHD_DEV_RANGE_END:
862
863 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
864 PCI_BUS(e->devid),
865 PCI_SLOT(e->devid),
866 PCI_FUNC(e->devid));
867
868 devid = e->devid;
869 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
870 if (alias) {
871 amd_iommu_alias_table[dev_i] = devid_to;
872 set_dev_entry_from_acpi(iommu,
873 devid_to, flags, ext_flags);
874 }
875 set_dev_entry_from_acpi(iommu, dev_i,
876 flags, ext_flags);
877 }
878 break;
879 default:
880 break;
881 }
882
883 p += ivhd_entry_length(p);
884 }
885}
886
887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{
890 u32 i;
891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i);
894
895 return 0;
896}
897
898static void __init free_iommu_one(struct amd_iommu *iommu)
899{
900 free_command_buffer(iommu);
901 free_event_buffer(iommu);
902 iommu_unmap_mmio_space(iommu);
903}
904
905static void __init free_iommu_all(void)
906{
907 struct amd_iommu *iommu, *next;
908
909 for_each_iommu_safe(iommu, next) {
910 list_del(&iommu->list);
911 free_iommu_one(iommu);
912 kfree(iommu);
913 }
914}
915
916/*
917 * This function clues the initialization function for one IOMMU
918 * together and also allocates the command buffer and programs the
919 * hardware. It does NOT enable the IOMMU. This is done afterwards.
920 */
921static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
922{
923 spin_lock_init(&iommu->lock);
924
925 /* Add IOMMU to internal data structures */
926 list_add_tail(&iommu->list, &amd_iommu_list);
927 iommu->index = amd_iommus_present++;
928
929 if (unlikely(iommu->index >= MAX_IOMMUS)) {
930 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
931 return -ENOSYS;
932 }
933
934 /* Index is fine - add IOMMU to the array */
935 amd_iommus[iommu->index] = iommu;
936
937 /*
938 * Copy data from ACPI table entry to the iommu struct
939 */
940 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
941 if (!iommu->dev)
942 return 1;
943
944 iommu->cap_ptr = h->cap_ptr;
945 iommu->pci_seg = h->pci_seg;
946 iommu->mmio_phys = h->mmio_phys;
947 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
948 if (!iommu->mmio_base)
949 return -ENOMEM;
950
951 iommu->cmd_buf = alloc_command_buffer(iommu);
952 if (!iommu->cmd_buf)
953 return -ENOMEM;
954
955 iommu->evt_buf = alloc_event_buffer(iommu);
956 if (!iommu->evt_buf)
957 return -ENOMEM;
958
959 iommu->int_enabled = false;
960
961 init_iommu_from_pci(iommu);
962 init_iommu_from_acpi(iommu, h);
963 init_iommu_devices(iommu);
964
965 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
966 amd_iommu_np_cache = true;
967
968 return pci_enable_device(iommu->dev);
969}
970
971/*
972 * Iterates over all IOMMU entries in the ACPI table, allocates the
973 * IOMMU structure and initializes it with init_iommu_one()
974 */
975static int __init init_iommu_all(struct acpi_table_header *table)
976{
977 u8 *p = (u8 *)table, *end = (u8 *)table;
978 struct ivhd_header *h;
979 struct amd_iommu *iommu;
980 int ret;
981
982 end += table->length;
983 p += IVRS_HEADER_LENGTH;
984
985 while (p < end) {
986 h = (struct ivhd_header *)p;
987 switch (*p) {
988 case ACPI_IVHD_TYPE:
989
990 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
991 "seg: %d flags: %01x info %04x\n",
992 PCI_BUS(h->devid), PCI_SLOT(h->devid),
993 PCI_FUNC(h->devid), h->cap_ptr,
994 h->pci_seg, h->flags, h->info);
995 DUMP_printk(" mmio-addr: %016llx\n",
996 h->mmio_phys);
997
998 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
999 if (iommu == NULL) {
1000 amd_iommu_init_err = -ENOMEM;
1001 return 0;
1002 }
1003
1004 ret = init_iommu_one(iommu, h);
1005 if (ret) {
1006 amd_iommu_init_err = ret;
1007 return 0;
1008 }
1009 break;
1010 default:
1011 break;
1012 }
1013 p += h->length;
1014
1015 }
1016 WARN_ON(p != end);
1017
1018 return 0;
1019}
1020
1021/****************************************************************************
1022 *
1023 * The following functions initialize the MSI interrupts for all IOMMUs
1024 * in the system. Its a bit challenging because there could be multiple
1025 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1026 * pci_dev.
1027 *
1028 ****************************************************************************/
1029
1030static int iommu_setup_msi(struct amd_iommu *iommu)
1031{
1032 int r;
1033
1034 if (pci_enable_msi(iommu->dev))
1035 return 1;
1036
1037 r = request_threaded_irq(iommu->dev->irq,
1038 amd_iommu_int_handler,
1039 amd_iommu_int_thread,
1040 0, "AMD-Vi",
1041 iommu->dev);
1042
1043 if (r) {
1044 pci_disable_msi(iommu->dev);
1045 return 1;
1046 }
1047
1048 iommu->int_enabled = true;
1049 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1050
1051 return 0;
1052}
1053
1054static int iommu_init_msi(struct amd_iommu *iommu)
1055{
1056 if (iommu->int_enabled)
1057 return 0;
1058
1059 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1060 return iommu_setup_msi(iommu);
1061
1062 return 1;
1063}
1064
1065/****************************************************************************
1066 *
1067 * The next functions belong to the third pass of parsing the ACPI
1068 * table. In this last pass the memory mapping requirements are
1069 * gathered (like exclusion and unity mapping reanges).
1070 *
1071 ****************************************************************************/
1072
1073static void __init free_unity_maps(void)
1074{
1075 struct unity_map_entry *entry, *next;
1076
1077 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1078 list_del(&entry->list);
1079 kfree(entry);
1080 }
1081}
1082
1083/* called when we find an exclusion range definition in ACPI */
1084static int __init init_exclusion_range(struct ivmd_header *m)
1085{
1086 int i;
1087
1088 switch (m->type) {
1089 case ACPI_IVMD_TYPE:
1090 set_device_exclusion_range(m->devid, m);
1091 break;
1092 case ACPI_IVMD_TYPE_ALL:
1093 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1094 set_device_exclusion_range(i, m);
1095 break;
1096 case ACPI_IVMD_TYPE_RANGE:
1097 for (i = m->devid; i <= m->aux; ++i)
1098 set_device_exclusion_range(i, m);
1099 break;
1100 default:
1101 break;
1102 }
1103
1104 return 0;
1105}
1106
1107/* called for unity map ACPI definition */
1108static int __init init_unity_map_range(struct ivmd_header *m)
1109{
1110 struct unity_map_entry *e = 0;
1111 char *s;
1112
1113 e = kzalloc(sizeof(*e), GFP_KERNEL);
1114 if (e == NULL)
1115 return -ENOMEM;
1116
1117 switch (m->type) {
1118 default:
1119 kfree(e);
1120 return 0;
1121 case ACPI_IVMD_TYPE:
1122 s = "IVMD_TYPEi\t\t\t";
1123 e->devid_start = e->devid_end = m->devid;
1124 break;
1125 case ACPI_IVMD_TYPE_ALL:
1126 s = "IVMD_TYPE_ALL\t\t";
1127 e->devid_start = 0;
1128 e->devid_end = amd_iommu_last_bdf;
1129 break;
1130 case ACPI_IVMD_TYPE_RANGE:
1131 s = "IVMD_TYPE_RANGE\t\t";
1132 e->devid_start = m->devid;
1133 e->devid_end = m->aux;
1134 break;
1135 }
1136 e->address_start = PAGE_ALIGN(m->range_start);
1137 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1138 e->prot = m->flags >> 1;
1139
1140 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1141 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1142 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1143 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1144 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1145 e->address_start, e->address_end, m->flags);
1146
1147 list_add_tail(&e->list, &amd_iommu_unity_map);
1148
1149 return 0;
1150}
1151
1152/* iterates over all memory definitions we find in the ACPI table */
1153static int __init init_memory_definitions(struct acpi_table_header *table)
1154{
1155 u8 *p = (u8 *)table, *end = (u8 *)table;
1156 struct ivmd_header *m;
1157
1158 end += table->length;
1159 p += IVRS_HEADER_LENGTH;
1160
1161 while (p < end) {
1162 m = (struct ivmd_header *)p;
1163 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1164 init_exclusion_range(m);
1165 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1166 init_unity_map_range(m);
1167
1168 p += m->length;
1169 }
1170
1171 return 0;
1172}
1173
1174/*
1175 * Init the device table to not allow DMA access for devices and
1176 * suppress all page faults
1177 */
1178static void init_device_table(void)
1179{
1180 u32 devid;
1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1184 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1185 }
1186}
1187
1188static void iommu_init_flags(struct amd_iommu *iommu)
1189{
1190 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1191 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1192 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1193
1194 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1195 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1196 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1197
1198 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1199 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1200 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1201
1202 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1203 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1204 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1205
1206 /*
1207 * make IOMMU memory accesses cache coherent
1208 */
1209 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1210}
1211
1212static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1213{
1214 int i, j;
1215 u32 ioc_feature_control;
1216 struct pci_dev *pdev = NULL;
1217
1218 /* RD890 BIOSes may not have completely reconfigured the iommu */
1219 if (!is_rd890_iommu(iommu->dev))
1220 return;
1221
1222 /*
1223 * First, we need to ensure that the iommu is enabled. This is
1224 * controlled by a register in the northbridge
1225 */
1226 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1227
1228 if (!pdev)
1229 return;
1230
1231 /* Select Northbridge indirect register 0x75 and enable writing */
1232 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1233 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1234
1235 /* Enable the iommu */
1236 if (!(ioc_feature_control & 0x1))
1237 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1238
1239 pci_dev_put(pdev);
1240
1241 /* Restore the iommu BAR */
1242 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1243 iommu->stored_addr_lo);
1244 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1245 iommu->stored_addr_hi);
1246
1247 /* Restore the l1 indirect regs for each of the 6 l1s */
1248 for (i = 0; i < 6; i++)
1249 for (j = 0; j < 0x12; j++)
1250 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1251
1252 /* Restore the l2 indirect regs */
1253 for (i = 0; i < 0x83; i++)
1254 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1255
1256 /* Lock PCI setup registers */
1257 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1258 iommu->stored_addr_lo | 1);
1259}
1260
1261/*
1262 * This function finally enables all IOMMUs found in the system after
1263 * they have been initialized
1264 */
1265static void enable_iommus(void)
1266{
1267 struct amd_iommu *iommu;
1268
1269 for_each_iommu(iommu) {
1270 iommu_disable(iommu);
1271 iommu_init_flags(iommu);
1272 iommu_set_device_table(iommu);
1273 iommu_enable_command_buffer(iommu);
1274 iommu_enable_event_buffer(iommu);
1275 iommu_set_exclusion_range(iommu);
1276 iommu_init_msi(iommu);
1277 iommu_enable(iommu);
1278 iommu_flush_all_caches(iommu);
1279 }
1280}
1281
1282static void disable_iommus(void)
1283{
1284 struct amd_iommu *iommu;
1285
1286 for_each_iommu(iommu)
1287 iommu_disable(iommu);
1288}
1289
1290/*
1291 * Suspend/Resume support
1292 * disable suspend until real resume implemented
1293 */
1294
1295static void amd_iommu_resume(void)
1296{
1297 struct amd_iommu *iommu;
1298
1299 for_each_iommu(iommu)
1300 iommu_apply_resume_quirks(iommu);
1301
1302 /* re-load the hardware */
1303 enable_iommus();
1304
1305 /*
1306 * we have to flush after the IOMMUs are enabled because a
1307 * disabled IOMMU will never execute the commands we send
1308 */
1309 for_each_iommu(iommu)
1310 iommu_flush_all_caches(iommu);
1311}
1312
1313static int amd_iommu_suspend(void)
1314{
1315 /* disable IOMMUs to go out of the way for BIOS */
1316 disable_iommus();
1317
1318 return 0;
1319}
1320
1321static struct syscore_ops amd_iommu_syscore_ops = {
1322 .suspend = amd_iommu_suspend,
1323 .resume = amd_iommu_resume,
1324};
1325
1326/*
1327 * This is the core init function for AMD IOMMU hardware in the system.
1328 * This function is called from the generic x86 DMA layer initialization
1329 * code.
1330 *
1331 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1332 * three times:
1333 *
1334 * 1 pass) Find the highest PCI device id the driver has to handle.
1335 * Upon this information the size of the data structures is
1336 * determined that needs to be allocated.
1337 *
1338 * 2 pass) Initialize the data structures just allocated with the
1339 * information in the ACPI table about available AMD IOMMUs
1340 * in the system. It also maps the PCI devices in the
1341 * system to specific IOMMUs
1342 *
1343 * 3 pass) After the basic data structures are allocated and
1344 * initialized we update them with information about memory
1345 * remapping requirements parsed out of the ACPI table in
1346 * this last pass.
1347 *
1348 * After that the hardware is initialized and ready to go. In the last
1349 * step we do some Linux specific things like registering the driver in
1350 * the dma_ops interface and initializing the suspend/resume support
1351 * functions. Finally it prints some information about AMD IOMMUs and
1352 * the driver state and enables the hardware.
1353 */
1354static int __init amd_iommu_init(void)
1355{
1356 int i, ret = 0;
1357
1358 /*
1359 * First parse ACPI tables to find the largest Bus/Dev/Func
1360 * we need to handle. Upon this information the shared data
1361 * structures for the IOMMUs in the system will be allocated
1362 */
1363 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1364 return -ENODEV;
1365
1366 ret = amd_iommu_init_err;
1367 if (ret)
1368 goto out;
1369
1370 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1371 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1372 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1373
1374 ret = -ENOMEM;
1375
1376 /* Device table - directly used by all IOMMUs */
1377 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1378 get_order(dev_table_size));
1379 if (amd_iommu_dev_table == NULL)
1380 goto out;
1381
1382 /*
1383 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1384 * IOMMU see for that device
1385 */
1386 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1387 get_order(alias_table_size));
1388 if (amd_iommu_alias_table == NULL)
1389 goto free;
1390
1391 /* IOMMU rlookup table - find the IOMMU for a specific device */
1392 amd_iommu_rlookup_table = (void *)__get_free_pages(
1393 GFP_KERNEL | __GFP_ZERO,
1394 get_order(rlookup_table_size));
1395 if (amd_iommu_rlookup_table == NULL)
1396 goto free;
1397
1398 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1399 GFP_KERNEL | __GFP_ZERO,
1400 get_order(MAX_DOMAIN_ID/8));
1401 if (amd_iommu_pd_alloc_bitmap == NULL)
1402 goto free;
1403
1404 /* init the device table */
1405 init_device_table();
1406
1407 /*
1408 * let all alias entries point to itself
1409 */
1410 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1411 amd_iommu_alias_table[i] = i;
1412
1413 /*
1414 * never allocate domain 0 because its used as the non-allocated and
1415 * error value placeholder
1416 */
1417 amd_iommu_pd_alloc_bitmap[0] = 1;
1418
1419 spin_lock_init(&amd_iommu_pd_lock);
1420
1421 /*
1422 * now the data structures are allocated and basically initialized
1423 * start the real acpi table scan
1424 */
1425 ret = -ENODEV;
1426 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1427 goto free;
1428
1429 if (amd_iommu_init_err) {
1430 ret = amd_iommu_init_err;
1431 goto free;
1432 }
1433
1434 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1435 goto free;
1436
1437 if (amd_iommu_init_err) {
1438 ret = amd_iommu_init_err;
1439 goto free;
1440 }
1441
1442 ret = amd_iommu_init_devices();
1443 if (ret)
1444 goto free;
1445
1446 enable_iommus();
1447
1448 if (iommu_pass_through)
1449 ret = amd_iommu_init_passthrough();
1450 else
1451 ret = amd_iommu_init_dma_ops();
1452
1453 if (ret)
1454 goto free_disable;
1455
1456 amd_iommu_init_api();
1457
1458 amd_iommu_init_notifier();
1459
1460 register_syscore_ops(&amd_iommu_syscore_ops);
1461
1462 if (iommu_pass_through)
1463 goto out;
1464
1465 if (amd_iommu_unmap_flush)
1466 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1467 else
1468 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1469
1470 x86_platform.iommu_shutdown = disable_iommus;
1471out:
1472 return ret;
1473
1474free_disable:
1475 disable_iommus();
1476
1477free:
1478 amd_iommu_uninit_devices();
1479
1480 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1481 get_order(MAX_DOMAIN_ID/8));
1482
1483 free_pages((unsigned long)amd_iommu_rlookup_table,
1484 get_order(rlookup_table_size));
1485
1486 free_pages((unsigned long)amd_iommu_alias_table,
1487 get_order(alias_table_size));
1488
1489 free_pages((unsigned long)amd_iommu_dev_table,
1490 get_order(dev_table_size));
1491
1492 free_iommu_all();
1493
1494 free_unity_maps();
1495
1496#ifdef CONFIG_GART_IOMMU
1497 /*
1498 * We failed to initialize the AMD IOMMU - try fallback to GART
1499 * if possible.
1500 */
1501 gart_iommu_init();
1502
1503#endif
1504
1505 goto out;
1506}
1507
1508/****************************************************************************
1509 *
1510 * Early detect code. This code runs at IOMMU detection time in the DMA
1511 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1512 * IOMMUs
1513 *
1514 ****************************************************************************/
1515static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1516{
1517 return 0;
1518}
1519
1520int __init amd_iommu_detect(void)
1521{
1522 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1523 return -ENODEV;
1524
1525 if (amd_iommu_disabled)
1526 return -ENODEV;
1527
1528 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1529 iommu_detected = 1;
1530 amd_iommu_detected = 1;
1531 x86_init.iommu.iommu_init = amd_iommu_init;
1532
1533 /* Make sure ACS will be enabled */
1534 pci_request_acs();
1535 return 1;
1536 }
1537 return -ENODEV;
1538}
1539
1540/****************************************************************************
1541 *
1542 * Parsing functions for the AMD IOMMU specific kernel command line
1543 * options.
1544 *
1545 ****************************************************************************/
1546
1547static int __init parse_amd_iommu_dump(char *str)
1548{
1549 amd_iommu_dump = true;
1550
1551 return 1;
1552}
1553
1554static int __init parse_amd_iommu_options(char *str)
1555{
1556 for (; *str; ++str) {
1557 if (strncmp(str, "fullflush", 9) == 0)
1558 amd_iommu_unmap_flush = true;
1559 if (strncmp(str, "off", 3) == 0)
1560 amd_iommu_disabled = true;
1561 }
1562
1563 return 1;
1564}
1565
1566__setup("amd_iommu_dump", parse_amd_iommu_dump);
1567__setup("amd_iommu=", parse_amd_iommu_options);
1568
1569IOMMU_INIT_FINISH(amd_iommu_detect,
1570 gart_iommu_hole_init,
1571 0,
1572 0);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 289e92862fd9..afdc3f756dea 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -27,15 +27,12 @@
27 * timer, but by default APB timer has higher rating than local APIC timers. 27 * timer, but by default APB timer has higher rating than local APIC timers.
28 */ 28 */
29 29
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/dw_apb_timer.h>
33#include <linux/errno.h> 32#include <linux/errno.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/sysdev.h>
36#include <linux/slab.h> 34#include <linux/slab.h>
37#include <linux/pm.h> 35#include <linux/pm.h>
38#include <linux/pci.h>
39#include <linux/sfi.h> 36#include <linux/sfi.h>
40#include <linux/interrupt.h> 37#include <linux/interrupt.h>
41#include <linux/cpu.h> 38#include <linux/cpu.h>
@@ -44,76 +41,48 @@
44#include <asm/fixmap.h> 41#include <asm/fixmap.h>
45#include <asm/apb_timer.h> 42#include <asm/apb_timer.h>
46#include <asm/mrst.h> 43#include <asm/mrst.h>
44#include <asm/time.h>
47 45
48#define APBT_MASK CLOCKSOURCE_MASK(32)
49#define APBT_SHIFT 22
50#define APBT_CLOCKEVENT_RATING 110 46#define APBT_CLOCKEVENT_RATING 110
51#define APBT_CLOCKSOURCE_RATING 250 47#define APBT_CLOCKSOURCE_RATING 250
52#define APBT_MIN_DELTA_USEC 200
53 48
54#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
55#define APBT_CLOCKEVENT0_NUM (0) 49#define APBT_CLOCKEVENT0_NUM (0)
56#define APBT_CLOCKEVENT1_NUM (1)
57#define APBT_CLOCKSOURCE_NUM (2) 50#define APBT_CLOCKSOURCE_NUM (2)
58 51
59static unsigned long apbt_address; 52static phys_addr_t apbt_address;
60static int apb_timer_block_enabled; 53static int apb_timer_block_enabled;
61static void __iomem *apbt_virt_address; 54static void __iomem *apbt_virt_address;
62static int phy_cs_timer_id;
63 55
64/* 56/*
65 * Common DW APB timer info 57 * Common DW APB timer info
66 */ 58 */
67static uint64_t apbt_freq; 59static unsigned long apbt_freq;
68
69static void apbt_set_mode(enum clock_event_mode mode,
70 struct clock_event_device *evt);
71static int apbt_next_event(unsigned long delta,
72 struct clock_event_device *evt);
73static cycle_t apbt_read_clocksource(struct clocksource *cs);
74static void apbt_restart_clocksource(struct clocksource *cs);
75 60
76struct apbt_dev { 61struct apbt_dev {
77 struct clock_event_device evt; 62 struct dw_apb_clock_event_device *timer;
78 unsigned int num; 63 unsigned int num;
79 int cpu; 64 int cpu;
80 unsigned int irq; 65 unsigned int irq;
81 unsigned int tick; 66 char name[10];
82 unsigned int count;
83 unsigned int flags;
84 char name[10];
85}; 67};
86 68
87static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); 69static struct dw_apb_clocksource *clocksource_apbt;
88 70
89#ifdef CONFIG_SMP 71static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
90static unsigned int apbt_num_timers_used;
91static struct apbt_dev *apbt_devs;
92#endif
93
94static inline unsigned long apbt_readl_reg(unsigned long a)
95{ 72{
96 return readl(apbt_virt_address + a); 73 return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
97} 74}
98 75
99static inline void apbt_writel_reg(unsigned long d, unsigned long a) 76static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
100{
101 writel(d, apbt_virt_address + a);
102}
103
104static inline unsigned long apbt_readl(int n, unsigned long a)
105{
106 return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
107}
108 77
109static inline void apbt_writel(int n, unsigned long d, unsigned long a) 78#ifdef CONFIG_SMP
110{ 79static unsigned int apbt_num_timers_used;
111 writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE); 80#endif
112}
113 81
114static inline void apbt_set_mapping(void) 82static inline void apbt_set_mapping(void)
115{ 83{
116 struct sfi_timer_table_entry *mtmr; 84 struct sfi_timer_table_entry *mtmr;
85 int phy_cs_timer_id = 0;
117 86
118 if (apbt_virt_address) { 87 if (apbt_virt_address) {
119 pr_debug("APBT base already mapped\n"); 88 pr_debug("APBT base already mapped\n");
@@ -125,21 +94,18 @@ static inline void apbt_set_mapping(void)
125 APBT_CLOCKEVENT0_NUM); 94 APBT_CLOCKEVENT0_NUM);
126 return; 95 return;
127 } 96 }
128 apbt_address = (unsigned long)mtmr->phys_addr; 97 apbt_address = (phys_addr_t)mtmr->phys_addr;
129 if (!apbt_address) { 98 if (!apbt_address) {
130 printk(KERN_WARNING "No timer base from SFI, use default\n"); 99 printk(KERN_WARNING "No timer base from SFI, use default\n");
131 apbt_address = APBT_DEFAULT_BASE; 100 apbt_address = APBT_DEFAULT_BASE;
132 } 101 }
133 apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); 102 apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
134 if (apbt_virt_address) { 103 if (!apbt_virt_address) {
135 pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\ 104 pr_debug("Failed mapping APBT phy address at %lu\n",\
136 (void *)apbt_address, (void *)apbt_virt_address); 105 (unsigned long)apbt_address);
137 } else {
138 pr_debug("Failed mapping APBT phy address at %p\n",\
139 (void *)apbt_address);
140 goto panic_noapbt; 106 goto panic_noapbt;
141 } 107 }
142 apbt_freq = mtmr->freq_hz / USEC_PER_SEC; 108 apbt_freq = mtmr->freq_hz;
143 sfi_free_mtmr(mtmr); 109 sfi_free_mtmr(mtmr);
144 110
145 /* Now figure out the physical timer id for clocksource device */ 111 /* Now figure out the physical timer id for clocksource device */
@@ -148,9 +114,14 @@ static inline void apbt_set_mapping(void)
148 goto panic_noapbt; 114 goto panic_noapbt;
149 115
150 /* Now figure out the physical timer id */ 116 /* Now figure out the physical timer id */
151 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) 117 pr_debug("Use timer %d for clocksource\n",
152 / APBTMRS_REG_SIZE; 118 (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
153 pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id); 119 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
120 APBTMRS_REG_SIZE;
121
122 clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
123 "apbt0", apbt_virt_address + phy_cs_timer_id *
124 APBTMRS_REG_SIZE, apbt_freq);
154 return; 125 return;
155 126
156panic_noapbt: 127panic_noapbt:
@@ -172,82 +143,6 @@ static inline int is_apbt_capable(void)
172 return apbt_virt_address ? 1 : 0; 143 return apbt_virt_address ? 1 : 0;
173} 144}
174 145
175static struct clocksource clocksource_apbt = {
176 .name = "apbt",
177 .rating = APBT_CLOCKSOURCE_RATING,
178 .read = apbt_read_clocksource,
179 .mask = APBT_MASK,
180 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
181 .resume = apbt_restart_clocksource,
182};
183
184/* boot APB clock event device */
185static struct clock_event_device apbt_clockevent = {
186 .name = "apbt0",
187 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
188 .set_mode = apbt_set_mode,
189 .set_next_event = apbt_next_event,
190 .shift = APBT_SHIFT,
191 .irq = 0,
192 .rating = APBT_CLOCKEVENT_RATING,
193};
194
195/*
196 * start count down from 0xffff_ffff. this is done by toggling the enable bit
197 * then load initial load count to ~0.
198 */
199static void apbt_start_counter(int n)
200{
201 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
202
203 ctrl &= ~APBTMR_CONTROL_ENABLE;
204 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
205 apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
206 /* enable, mask interrupt */
207 ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
208 ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
209 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
210 /* read it once to get cached counter value initialized */
211 apbt_read_clocksource(&clocksource_apbt);
212}
213
214static irqreturn_t apbt_interrupt_handler(int irq, void *data)
215{
216 struct apbt_dev *dev = (struct apbt_dev *)data;
217 struct clock_event_device *aevt = &dev->evt;
218
219 if (!aevt->event_handler) {
220 printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
221 dev->num);
222 return IRQ_NONE;
223 }
224 aevt->event_handler(aevt);
225 return IRQ_HANDLED;
226}
227
228static void apbt_restart_clocksource(struct clocksource *cs)
229{
230 apbt_start_counter(phy_cs_timer_id);
231}
232
233static void apbt_enable_int(int n)
234{
235 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
236 /* clear pending intr */
237 apbt_readl(n, APBTMR_N_EOI);
238 ctrl &= ~APBTMR_CONTROL_INT;
239 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
240}
241
242static void apbt_disable_int(int n)
243{
244 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
245
246 ctrl |= APBTMR_CONTROL_INT;
247 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
248}
249
250
251static int __init apbt_clockevent_register(void) 146static int __init apbt_clockevent_register(void)
252{ 147{
253 struct sfi_timer_table_entry *mtmr; 148 struct sfi_timer_table_entry *mtmr;
@@ -260,45 +155,21 @@ static int __init apbt_clockevent_register(void)
260 return -ENODEV; 155 return -ENODEV;
261 } 156 }
262 157
263 /*
264 * We need to calculate the scaled math multiplication factor for
265 * nanosecond to apbt tick conversion.
266 * mult = (nsec/cycle)*2^APBT_SHIFT
267 */
268 apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
269 , NSEC_PER_SEC, APBT_SHIFT);
270
271 /* Calculate the min / max delta */
272 apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
273 &apbt_clockevent);
274 apbt_clockevent.min_delta_ns = clockevent_delta2ns(
275 APBT_MIN_DELTA_USEC*apbt_freq,
276 &apbt_clockevent);
277 /*
278 * Start apbt with the boot cpu mask and make it
279 * global if not used for per cpu timer.
280 */
281 apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
282 adev->num = smp_processor_id(); 158 adev->num = smp_processor_id();
283 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 159 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
160 mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
161 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
162 adev_virt_addr(adev), 0, apbt_freq);
163 /* Firmware does EOI handling for us. */
164 adev->timer->eoi = NULL;
284 165
285 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 166 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
286 adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; 167 global_clock_event = &adev->timer->ced;
287 global_clock_event = &adev->evt;
288 printk(KERN_DEBUG "%s clockevent registered as global\n", 168 printk(KERN_DEBUG "%s clockevent registered as global\n",
289 global_clock_event->name); 169 global_clock_event->name);
290 } 170 }
291 171
292 if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, 172 dw_apb_clockevent_register(adev->timer);
293 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
294 apbt_clockevent.name, adev)) {
295 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
296 apbt_clockevent.irq);
297 }
298
299 clockevents_register_device(&adev->evt);
300 /* Start APBT 0 interrupts */
301 apbt_enable_int(APBT_CLOCKEVENT0_NUM);
302 173
303 sfi_free_mtmr(mtmr); 174 sfi_free_mtmr(mtmr);
304 return 0; 175 return 0;
@@ -316,52 +187,34 @@ static void apbt_setup_irq(struct apbt_dev *adev)
316 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
317 /* APB timer irqs are set up as mp_irqs, timer is edge type */ 188 /* APB timer irqs are set up as mp_irqs, timer is edge type */
318 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge"); 189 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
319
320 if (system_state == SYSTEM_BOOTING) {
321 if (request_irq(adev->irq, apbt_interrupt_handler,
322 IRQF_TIMER | IRQF_DISABLED |
323 IRQF_NOBALANCING,
324 adev->name, adev)) {
325 printk(KERN_ERR "Failed request IRQ for APBT%d\n",
326 adev->num);
327 }
328 } else
329 enable_irq(adev->irq);
330} 190}
331 191
332/* Should be called with per cpu */ 192/* Should be called with per cpu */
333void apbt_setup_secondary_clock(void) 193void apbt_setup_secondary_clock(void)
334{ 194{
335 struct apbt_dev *adev; 195 struct apbt_dev *adev;
336 struct clock_event_device *aevt;
337 int cpu; 196 int cpu;
338 197
339 /* Don't register boot CPU clockevent */ 198 /* Don't register boot CPU clockevent */
340 cpu = smp_processor_id(); 199 cpu = smp_processor_id();
341 if (!cpu) 200 if (!cpu)
342 return; 201 return;
343 /*
344 * We need to calculate the scaled math multiplication factor for
345 * nanosecond to apbt tick conversion.
346 * mult = (nsec/cycle)*2^APBT_SHIFT
347 */
348 printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
349 adev = &per_cpu(cpu_apbt_dev, cpu);
350 aevt = &adev->evt;
351 202
352 memcpy(aevt, &apbt_clockevent, sizeof(*aevt)); 203 adev = &__get_cpu_var(cpu_apbt_dev);
353 aevt->cpumask = cpumask_of(cpu); 204 if (!adev->timer) {
354 aevt->name = adev->name; 205 adev->timer = dw_apb_clockevent_init(cpu, adev->name,
355 aevt->mode = CLOCK_EVT_MODE_UNUSED; 206 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
207 adev->irq, apbt_freq);
208 adev->timer->eoi = NULL;
209 } else {
210 dw_apb_clockevent_resume(adev->timer);
211 }
356 212
357 printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n", 213 printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
358 cpu, aevt->name, *(u32 *)aevt->cpumask); 214 cpu, adev->name, adev->cpu);
359 215
360 apbt_setup_irq(adev); 216 apbt_setup_irq(adev);
361 217 dw_apb_clockevent_register(adev->timer);
362 clockevents_register_device(aevt);
363
364 apbt_enable_int(cpu);
365 218
366 return; 219 return;
367} 220}
@@ -384,13 +237,12 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
384 237
385 switch (action & 0xf) { 238 switch (action & 0xf) {
386 case CPU_DEAD: 239 case CPU_DEAD:
387 disable_irq(adev->irq); 240 dw_apb_clockevent_pause(adev->timer);
388 apbt_disable_int(cpu);
389 if (system_state == SYSTEM_RUNNING) { 241 if (system_state == SYSTEM_RUNNING) {
390 pr_debug("skipping APBT CPU %lu offline\n", cpu); 242 pr_debug("skipping APBT CPU %lu offline\n", cpu);
391 } else if (adev) { 243 } else if (adev) {
392 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 244 pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
393 free_irq(adev->irq, adev); 245 dw_apb_clockevent_stop(adev->timer);
394 } 246 }
395 break; 247 break;
396 default: 248 default:
@@ -415,116 +267,16 @@ void apbt_setup_secondary_clock(void) {}
415 267
416#endif /* CONFIG_SMP */ 268#endif /* CONFIG_SMP */
417 269
418static void apbt_set_mode(enum clock_event_mode mode,
419 struct clock_event_device *evt)
420{
421 unsigned long ctrl;
422 uint64_t delta;
423 int timer_num;
424 struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
425
426 BUG_ON(!apbt_virt_address);
427
428 timer_num = adev->num;
429 pr_debug("%s CPU %d timer %d mode=%d\n",
430 __func__, first_cpu(*evt->cpumask), timer_num, mode);
431
432 switch (mode) {
433 case CLOCK_EVT_MODE_PERIODIC:
434 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
435 delta >>= apbt_clockevent.shift;
436 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
437 ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
438 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
439 /*
440 * DW APB p. 46, have to disable timer before load counter,
441 * may cause sync problem.
442 */
443 ctrl &= ~APBTMR_CONTROL_ENABLE;
444 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
445 udelay(1);
446 pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
447 apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
448 ctrl |= APBTMR_CONTROL_ENABLE;
449 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
450 break;
451 /* APB timer does not have one-shot mode, use free running mode */
452 case CLOCK_EVT_MODE_ONESHOT:
453 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
454 /*
455 * set free running mode, this mode will let timer reload max
456 * timeout which will give time (3min on 25MHz clock) to rearm
457 * the next event, therefore emulate the one-shot mode.
458 */
459 ctrl &= ~APBTMR_CONTROL_ENABLE;
460 ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
461
462 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
463 /* write again to set free running mode */
464 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
465
466 /*
467 * DW APB p. 46, load counter with all 1s before starting free
468 * running mode.
469 */
470 apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
471 ctrl &= ~APBTMR_CONTROL_INT;
472 ctrl |= APBTMR_CONTROL_ENABLE;
473 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
474 break;
475
476 case CLOCK_EVT_MODE_UNUSED:
477 case CLOCK_EVT_MODE_SHUTDOWN:
478 apbt_disable_int(timer_num);
479 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
480 ctrl &= ~APBTMR_CONTROL_ENABLE;
481 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
482 break;
483
484 case CLOCK_EVT_MODE_RESUME:
485 apbt_enable_int(timer_num);
486 break;
487 }
488}
489
490static int apbt_next_event(unsigned long delta,
491 struct clock_event_device *evt)
492{
493 unsigned long ctrl;
494 int timer_num;
495
496 struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
497
498 timer_num = adev->num;
499 /* Disable timer */
500 ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
501 ctrl &= ~APBTMR_CONTROL_ENABLE;
502 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
503 /* write new count */
504 apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
505 ctrl |= APBTMR_CONTROL_ENABLE;
506 apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
507 return 0;
508}
509
510static cycle_t apbt_read_clocksource(struct clocksource *cs)
511{
512 unsigned long current_count;
513
514 current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
515 return (cycle_t)~current_count;
516}
517
518static int apbt_clocksource_register(void) 270static int apbt_clocksource_register(void)
519{ 271{
520 u64 start, now; 272 u64 start, now;
521 cycle_t t1; 273 cycle_t t1;
522 274
523 /* Start the counter, use timer 2 as source, timer 0/1 for event */ 275 /* Start the counter, use timer 2 as source, timer 0/1 for event */
524 apbt_start_counter(phy_cs_timer_id); 276 dw_apb_clocksource_start(clocksource_apbt);
525 277
526 /* Verify whether apbt counter works */ 278 /* Verify whether apbt counter works */
527 t1 = apbt_read_clocksource(&clocksource_apbt); 279 t1 = dw_apb_clocksource_read(clocksource_apbt);
528 rdtscll(start); 280 rdtscll(start);
529 281
530 /* 282 /*
@@ -539,10 +291,10 @@ static int apbt_clocksource_register(void)
539 } while ((now - start) < 200000UL); 291 } while ((now - start) < 200000UL);
540 292
541 /* APBT is the only always on clocksource, it has to work! */ 293 /* APBT is the only always on clocksource, it has to work! */
542 if (t1 == apbt_read_clocksource(&clocksource_apbt)) 294 if (t1 == dw_apb_clocksource_read(clocksource_apbt))
543 panic("APBT counter not counting. APBT disabled\n"); 295 panic("APBT counter not counting. APBT disabled\n");
544 296
545 clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000); 297 dw_apb_clocksource_register(clocksource_apbt);
546 298
547 return 0; 299 return 0;
548} 300}
@@ -566,10 +318,7 @@ void __init apbt_time_init(void)
566 if (apb_timer_block_enabled) 318 if (apb_timer_block_enabled)
567 return; 319 return;
568 apbt_set_mapping(); 320 apbt_set_mapping();
569 if (apbt_virt_address) { 321 if (!apbt_virt_address)
570 pr_debug("Found APBT version 0x%lx\n",\
571 apbt_readl_reg(APBTMRS_COMP_VERSION));
572 } else
573 goto out_noapbt; 322 goto out_noapbt;
574 /* 323 /*
575 * Read the frequency and check for a sane value, for ESL model 324 * Read the frequency and check for a sane value, for ESL model
@@ -577,7 +326,7 @@ void __init apbt_time_init(void)
577 */ 326 */
578 327
579 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { 328 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
580 pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq); 329 pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
581 goto out_noapbt; 330 goto out_noapbt;
582 } 331 }
583 if (apbt_clocksource_register()) { 332 if (apbt_clocksource_register()) {
@@ -603,30 +352,20 @@ void __init apbt_time_init(void)
603 } else { 352 } else {
604 percpu_timer = 0; 353 percpu_timer = 0;
605 apbt_num_timers_used = 1; 354 apbt_num_timers_used = 1;
606 adev = &per_cpu(cpu_apbt_dev, 0);
607 adev->flags &= ~APBT_DEV_USED;
608 } 355 }
609 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 356 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
610 357
611 /* here we set up per CPU timer data structure */ 358 /* here we set up per CPU timer data structure */
612 apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
613 GFP_KERNEL);
614 if (!apbt_devs) {
615 printk(KERN_ERR "Failed to allocate APB timer devices\n");
616 return;
617 }
618 for (i = 0; i < apbt_num_timers_used; i++) { 359 for (i = 0; i < apbt_num_timers_used; i++) {
619 adev = &per_cpu(cpu_apbt_dev, i); 360 adev = &per_cpu(cpu_apbt_dev, i);
620 adev->num = i; 361 adev->num = i;
621 adev->cpu = i; 362 adev->cpu = i;
622 p_mtmr = sfi_get_mtmr(i); 363 p_mtmr = sfi_get_mtmr(i);
623 if (p_mtmr) { 364 if (p_mtmr)
624 adev->tick = p_mtmr->freq_hz;
625 adev->irq = p_mtmr->irq; 365 adev->irq = p_mtmr->irq;
626 } else 366 else
627 printk(KERN_ERR "Failed to get timer for cpu %d\n", i); 367 printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
628 adev->count = 0; 368 snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
629 sprintf(adev->name, "apbt%d", i);
630 } 369 }
631#endif 370#endif
632 371
@@ -638,17 +377,8 @@ out_noapbt:
638 panic("failed to enable APB timer\n"); 377 panic("failed to enable APB timer\n");
639} 378}
640 379
641static inline void apbt_disable(int n)
642{
643 if (is_apbt_capable()) {
644 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
645 ctrl &= ~APBTMR_CONTROL_ENABLE;
646 apbt_writel(n, ctrl, APBTMR_N_CONTROL);
647 }
648}
649
650/* called before apb_timer_enable, use early map */ 380/* called before apb_timer_enable, use early map */
651unsigned long apbt_quick_calibrate() 381unsigned long apbt_quick_calibrate(void)
652{ 382{
653 int i, scale; 383 int i, scale;
654 u64 old, new; 384 u64 old, new;
@@ -657,31 +387,31 @@ unsigned long apbt_quick_calibrate()
657 u32 loop, shift; 387 u32 loop, shift;
658 388
659 apbt_set_mapping(); 389 apbt_set_mapping();
660 apbt_start_counter(phy_cs_timer_id); 390 dw_apb_clocksource_start(clocksource_apbt);
661 391
662 /* check if the timer can count down, otherwise return */ 392 /* check if the timer can count down, otherwise return */
663 old = apbt_read_clocksource(&clocksource_apbt); 393 old = dw_apb_clocksource_read(clocksource_apbt);
664 i = 10000; 394 i = 10000;
665 while (--i) { 395 while (--i) {
666 if (old != apbt_read_clocksource(&clocksource_apbt)) 396 if (old != dw_apb_clocksource_read(clocksource_apbt))
667 break; 397 break;
668 } 398 }
669 if (!i) 399 if (!i)
670 goto failed; 400 goto failed;
671 401
672 /* count 16 ms */ 402 /* count 16 ms */
673 loop = (apbt_freq * 1000) << 4; 403 loop = (apbt_freq / 1000) << 4;
674 404
675 /* restart the timer to ensure it won't get to 0 in the calibration */ 405 /* restart the timer to ensure it won't get to 0 in the calibration */
676 apbt_start_counter(phy_cs_timer_id); 406 dw_apb_clocksource_start(clocksource_apbt);
677 407
678 old = apbt_read_clocksource(&clocksource_apbt); 408 old = dw_apb_clocksource_read(clocksource_apbt);
679 old += loop; 409 old += loop;
680 410
681 t1 = __native_read_tsc(); 411 t1 = __native_read_tsc();
682 412
683 do { 413 do {
684 new = apbt_read_clocksource(&clocksource_apbt); 414 new = dw_apb_clocksource_read(clocksource_apbt);
685 } while (new < old); 415 } while (new < old);
686 416
687 t2 = __native_read_tsc(); 417 t2 = __native_read_tsc();
@@ -693,7 +423,7 @@ unsigned long apbt_quick_calibrate()
693 return 0; 423 return 0;
694 } 424 }
695 scale = (int)div_u64((t2 - t1), loop >> shift); 425 scale = (int)div_u64((t2 - t1), loop >> shift);
696 khz = (scale * apbt_freq * 1000) >> shift; 426 khz = (scale * (apbt_freq / 1000)) >> shift;
697 printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); 427 printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
698 return khz; 428 return khz;
699failed: 429failed:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b9338b8cf420..b24be38c8cf8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -27,6 +27,7 @@
27#include <linux/syscore_ops.h> 27#include <linux/syscore_ops.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/timex.h> 29#include <linux/timex.h>
30#include <linux/i8253.h>
30#include <linux/dmar.h> 31#include <linux/dmar.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/cpu.h> 33#include <linux/cpu.h>
@@ -39,7 +40,6 @@
39#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41#include <asm/mpspec.h> 42#include <asm/mpspec.h>
42#include <asm/i8253.h>
43#include <asm/i8259.h> 43#include <asm/i8259.h>
44#include <asm/proto.h> 44#include <asm/proto.h>
45#include <asm/apic.h> 45#include <asm/apic.h>
@@ -48,6 +48,7 @@
48#include <asm/hpet.h> 48#include <asm/hpet.h>
49#include <asm/idle.h> 49#include <asm/idle.h>
50#include <asm/mtrr.h> 50#include <asm/mtrr.h>
51#include <asm/time.h>
51#include <asm/smp.h> 52#include <asm/smp.h>
52#include <asm/mce.h> 53#include <asm/mce.h>
53#include <asm/tsc.h> 54#include <asm/tsc.h>
@@ -1429,7 +1430,7 @@ void enable_x2apic(void)
1429 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1430 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1430 if (!(msr & X2APIC_ENABLE)) { 1431 if (!(msr & X2APIC_ENABLE)) {
1431 printk_once(KERN_INFO "Enabling x2apic\n"); 1432 printk_once(KERN_INFO "Enabling x2apic\n");
1432 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); 1433 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2);
1433 } 1434 }
1434} 1435}
1435#endif /* CONFIG_X86_X2APIC */ 1436#endif /* CONFIG_X86_X2APIC */
@@ -1943,10 +1944,28 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1943 1944
1944void __cpuinit generic_processor_info(int apicid, int version) 1945void __cpuinit generic_processor_info(int apicid, int version)
1945{ 1946{
1946 int cpu; 1947 int cpu, max = nr_cpu_ids;
1948 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
1949 phys_cpu_present_map);
1950
1951 /*
1952 * If boot cpu has not been detected yet, then only allow upto
1953 * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
1954 */
1955 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
1956 apicid != boot_cpu_physical_apicid) {
1957 int thiscpu = max + disabled_cpus - 1;
1958
1959 pr_warning(
1960 "ACPI: NR_CPUS/possible_cpus limit of %i almost"
1961 " reached. Keeping one slot for boot cpu."
1962 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
1963
1964 disabled_cpus++;
1965 return;
1966 }
1947 1967
1948 if (num_processors >= nr_cpu_ids) { 1968 if (num_processors >= nr_cpu_ids) {
1949 int max = nr_cpu_ids;
1950 int thiscpu = max + disabled_cpus; 1969 int thiscpu = max + disabled_cpus;
1951 1970
1952 pr_warning( 1971 pr_warning(
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e5293394b548..8eb863e27ea6 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1295,6 +1295,16 @@ static int setup_ioapic_entry(int apic_id, int irq,
1295 * irq handler will do the explicit EOI to the io-apic. 1295 * irq handler will do the explicit EOI to the io-apic.
1296 */ 1296 */
1297 ir_entry->vector = pin; 1297 ir_entry->vector = pin;
1298
1299 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
1300 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
1301 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
1302 "Avail:%X Vector:%02X Dest:%08X "
1303 "SID:%04X SQ:%X SVT:%X)\n",
1304 apic_id, irte.present, irte.fpd, irte.dst_mode,
1305 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
1306 irte.avail, irte.vector, irte.dest_id,
1307 irte.sid, irte.sq, irte.svt);
1298 } else { 1308 } else {
1299 entry->delivery_mode = apic->irq_delivery_mode; 1309 entry->delivery_mode = apic->irq_delivery_mode;
1300 entry->dest_mode = apic->irq_dest_mode; 1310 entry->dest_mode = apic->irq_dest_mode;
@@ -1337,9 +1347,9 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
1337 1347
1338 apic_printk(APIC_VERBOSE,KERN_DEBUG 1348 apic_printk(APIC_VERBOSE,KERN_DEBUG
1339 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1349 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1340 "IRQ %d Mode:%i Active:%i)\n", 1350 "IRQ %d Mode:%i Active:%i Dest:%d)\n",
1341 apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector, 1351 apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector,
1342 irq, trigger, polarity); 1352 irq, trigger, polarity, dest);
1343 1353
1344 1354
1345 if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry, 1355 if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry,
@@ -1522,10 +1532,12 @@ __apicdebuginit(void) print_IO_APIC(void)
1522 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1532 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1523 1533
1524 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01); 1534 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1525 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1535 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1536 reg_01.bits.entries);
1526 1537
1527 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1538 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1528 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1539 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1540 reg_01.bits.version);
1529 1541
1530 /* 1542 /*
1531 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1543 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
@@ -1550,31 +1562,60 @@ __apicdebuginit(void) print_IO_APIC(void)
1550 1562
1551 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1563 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1552 1564
1553 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1565 if (intr_remapping_enabled) {
1554 " Stat Dmod Deli Vect:\n"); 1566 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
1567 " Pol Stat Indx2 Zero Vect:\n");
1568 } else {
1569 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1570 " Stat Dmod Deli Vect:\n");
1571 }
1555 1572
1556 for (i = 0; i <= reg_01.bits.entries; i++) { 1573 for (i = 0; i <= reg_01.bits.entries; i++) {
1557 struct IO_APIC_route_entry entry; 1574 if (intr_remapping_enabled) {
1558 1575 struct IO_APIC_route_entry entry;
1559 entry = ioapic_read_entry(apic, i); 1576 struct IR_IO_APIC_route_entry *ir_entry;
1560 1577
1561 printk(KERN_DEBUG " %02x %03X ", 1578 entry = ioapic_read_entry(apic, i);
1562 i, 1579 ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
1563 entry.dest 1580 printk(KERN_DEBUG " %02x %04X ",
1564 ); 1581 i,
1582 ir_entry->index
1583 );
1584 printk("%1d %1d %1d %1d %1d "
1585 "%1d %1d %X %02X\n",
1586 ir_entry->format,
1587 ir_entry->mask,
1588 ir_entry->trigger,
1589 ir_entry->irr,
1590 ir_entry->polarity,
1591 ir_entry->delivery_status,
1592 ir_entry->index2,
1593 ir_entry->zero,
1594 ir_entry->vector
1595 );
1596 } else {
1597 struct IO_APIC_route_entry entry;
1565 1598
1566 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1599 entry = ioapic_read_entry(apic, i);
1567 entry.mask, 1600 printk(KERN_DEBUG " %02x %02X ",
1568 entry.trigger, 1601 i,
1569 entry.irr, 1602 entry.dest
1570 entry.polarity, 1603 );
1571 entry.delivery_status, 1604 printk("%1d %1d %1d %1d %1d "
1572 entry.dest_mode, 1605 "%1d %1d %02X\n",
1573 entry.delivery_mode, 1606 entry.mask,
1574 entry.vector 1607 entry.trigger,
1575 ); 1608 entry.irr,
1609 entry.polarity,
1610 entry.delivery_status,
1611 entry.dest_mode,
1612 entry.delivery_mode,
1613 entry.vector
1614 );
1615 }
1576 } 1616 }
1577 } 1617 }
1618
1578 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1619 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1579 for_each_active_irq(irq) { 1620 for_each_active_irq(irq) {
1580 struct irq_pin_list *entry; 1621 struct irq_pin_list *entry;
@@ -1792,7 +1833,7 @@ __apicdebuginit(int) print_ICs(void)
1792 return 0; 1833 return 0;
1793} 1834}
1794 1835
1795fs_initcall(print_ICs); 1836late_initcall(print_ICs);
1796 1837
1797 1838
1798/* Where if anywhere is the i8259 connect in external int mode */ 1839/* Where if anywhere is the i8259 connect in external int mode */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 965a7666c283..0371c484bb8a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -229,11 +229,11 @@
229#include <linux/jiffies.h> 229#include <linux/jiffies.h>
230#include <linux/acpi.h> 230#include <linux/acpi.h>
231#include <linux/syscore_ops.h> 231#include <linux/syscore_ops.h>
232#include <linux/i8253.h>
232 233
233#include <asm/system.h> 234#include <asm/system.h>
234#include <asm/uaccess.h> 235#include <asm/uaccess.h>
235#include <asm/desc.h> 236#include <asm/desc.h>
236#include <asm/i8253.h>
237#include <asm/olpc.h> 237#include <asm/olpc.h>
238#include <asm/paravirt.h> 238#include <asm/paravirt.h>
239#include <asm/reboot.h> 239#include <asm/reboot.h>
@@ -1220,11 +1220,11 @@ static void reinit_timer(void)
1220 1220
1221 raw_spin_lock_irqsave(&i8253_lock, flags); 1221 raw_spin_lock_irqsave(&i8253_lock, flags);
1222 /* set the clock to HZ */ 1222 /* set the clock to HZ */
1223 outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ 1223 outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
1224 udelay(10); 1224 udelay(10);
1225 outb_pit(LATCH & 0xff, PIT_CH0); /* LSB */ 1225 outb_p(LATCH & 0xff, PIT_CH0); /* LSB */
1226 udelay(10); 1226 udelay(10);
1227 outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ 1227 outb_p(LATCH >> 8, PIT_CH0); /* MSB */
1228 udelay(10); 1228 udelay(10);
1229 raw_spin_unlock_irqrestore(&i8253_lock, flags); 1229 raw_spin_unlock_irqrestore(&i8253_lock, flags);
1230#endif 1230#endif
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index c29d631af6fc..395a10e68067 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -63,7 +63,6 @@ void foo(void)
63 BLANK(); 63 BLANK();
64 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); 64 OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
65 OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); 65 OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
66 OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
67 66
68 BLANK(); 67 BLANK();
69 OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); 68 OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 525514cf33c3..46674fbb62ba 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -62,6 +62,8 @@ static void __init check_fpu(void)
62 return; 62 return;
63 } 63 }
64 64
65 kernel_fpu_begin();
66
65 /* 67 /*
66 * trap_init() enabled FXSR and company _before_ testing for FP 68 * trap_init() enabled FXSR and company _before_ testing for FP
67 * problems here. 69 * problems here.
@@ -80,6 +82,8 @@ static void __init check_fpu(void)
80 : "=m" (*&fdiv_bug) 82 : "=m" (*&fdiv_bug)
81 : "m" (*&x), "m" (*&y)); 83 : "m" (*&x), "m" (*&y));
82 84
85 kernel_fpu_end();
86
83 boot_cpu_data.fdiv_bug = fdiv_bug; 87 boot_cpu_data.fdiv_bug = fdiv_bug;
84 if (boot_cpu_data.fdiv_bug) 88 if (boot_cpu_data.fdiv_bug)
85 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); 89 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 8095f8611f8a..755f64fb0743 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -32,11 +32,11 @@
32 */ 32 */
33static const __initconst struct hypervisor_x86 * const hypervisors[] = 33static const __initconst struct hypervisor_x86 * const hypervisors[] =
34{ 34{
35 &x86_hyper_vmware,
36 &x86_hyper_ms_hyperv,
37#ifdef CONFIG_XEN_PVHVM 35#ifdef CONFIG_XEN_PVHVM
38 &x86_hyper_xen_hvm, 36 &x86_hyper_xen_hvm,
39#endif 37#endif
38 &x86_hyper_vmware,
39 &x86_hyper_ms_hyperv,
40}; 40};
41 41
42const struct hypervisor_x86 *x86_hyper; 42const struct hypervisor_x86 *x86_hyper;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1edf5ba4fb2b..ed6086eedf1d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -456,6 +456,24 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
456 456
457 if (cpu_has(c, X86_FEATURE_VMX)) 457 if (cpu_has(c, X86_FEATURE_VMX))
458 detect_vmx_virtcap(c); 458 detect_vmx_virtcap(c);
459
460 /*
461 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
462 * x86_energy_perf_policy(8) is available to change it at run-time
463 */
464 if (cpu_has(c, X86_FEATURE_EPB)) {
465 u64 epb;
466
467 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
468 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
469 printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
470 " Set to 'normal', was 'performance'\n"
471 "ENERGY_PERF_BIAS: View and update with"
472 " x86_energy_perf_policy(8)\n");
473 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
474 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
475 }
476 }
459} 477}
460 478
461#ifdef CONFIG_X86_32 479#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 1e8d66c1336a..7395d5f4272d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -43,61 +43,105 @@ static struct severity {
43 unsigned char covered; 43 unsigned char covered;
44 char *msg; 44 char *msg;
45} severities[] = { 45} severities[] = {
46#define KERNEL .context = IN_KERNEL 46#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
47#define USER .context = IN_USER 47#define KERNEL .context = IN_KERNEL
48#define SER .ser = SER_REQUIRED 48#define USER .context = IN_USER
49#define NOSER .ser = NO_SER 49#define SER .ser = SER_REQUIRED
50#define SEV(s) .sev = MCE_ ## s ## _SEVERITY 50#define NOSER .ser = NO_SER
51#define BITCLR(x, s, m, r...) { .mask = x, .result = 0, SEV(s), .msg = m, ## r } 51#define BITCLR(x) .mask = x, .result = 0
52#define BITSET(x, s, m, r...) { .mask = x, .result = x, SEV(s), .msg = m, ## r } 52#define BITSET(x) .mask = x, .result = x
53#define MCGMASK(x, res, s, m, r...) \ 53#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
54 { .mcgmask = x, .mcgres = res, SEV(s), .msg = m, ## r } 54#define MASK(x, y) .mask = x, .result = y
55#define MASK(x, y, s, m, r...) \
56 { .mask = x, .result = y, SEV(s), .msg = m, ## r }
57#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) 55#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
58#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) 56#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
59#define MCACOD 0xffff 57#define MCACOD 0xffff
60 58
61 BITCLR(MCI_STATUS_VAL, NO, "Invalid"), 59 MCESEV(
62 BITCLR(MCI_STATUS_EN, NO, "Not enabled"), 60 NO, "Invalid",
63 BITSET(MCI_STATUS_PCC, PANIC, "Processor context corrupt"), 61 BITCLR(MCI_STATUS_VAL)
62 ),
63 MCESEV(
64 NO, "Not enabled",
65 BITCLR(MCI_STATUS_EN)
66 ),
67 MCESEV(
68 PANIC, "Processor context corrupt",
69 BITSET(MCI_STATUS_PCC)
70 ),
64 /* When MCIP is not set something is very confused */ 71 /* When MCIP is not set something is very confused */
65 MCGMASK(MCG_STATUS_MCIP, 0, PANIC, "MCIP not set in MCA handler"), 72 MCESEV(
73 PANIC, "MCIP not set in MCA handler",
74 MCGMASK(MCG_STATUS_MCIP, 0)
75 ),
66 /* Neither return not error IP -- no chance to recover -> PANIC */ 76 /* Neither return not error IP -- no chance to recover -> PANIC */
67 MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0, PANIC, 77 MCESEV(
68 "Neither restart nor error IP"), 78 PANIC, "Neither restart nor error IP",
69 MCGMASK(MCG_STATUS_RIPV, 0, PANIC, "In kernel and no restart IP", 79 MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
70 KERNEL), 80 ),
71 BITCLR(MCI_STATUS_UC, KEEP, "Corrected error", NOSER), 81 MCESEV(
72 MASK(MCI_STATUS_OVER|MCI_STATUS_UC|MCI_STATUS_EN, MCI_STATUS_UC, SOME, 82 PANIC, "In kernel and no restart IP",
73 "Spurious not enabled", SER), 83 KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
84 ),
85 MCESEV(
86 KEEP, "Corrected error",
87 NOSER, BITCLR(MCI_STATUS_UC)
88 ),
74 89
75 /* ignore OVER for UCNA */ 90 /* ignore OVER for UCNA */
76 MASK(MCI_UC_SAR, MCI_STATUS_UC, KEEP, 91 MCESEV(
77 "Uncorrected no action required", SER), 92 KEEP, "Uncorrected no action required",
78 MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR, PANIC, 93 SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
79 "Illegal combination (UCNA with AR=1)", SER), 94 ),
80 MASK(MCI_STATUS_S, 0, KEEP, "Non signalled machine check", SER), 95 MCESEV(
96 PANIC, "Illegal combination (UCNA with AR=1)",
97 SER,
98 MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
99 ),
100 MCESEV(
101 KEEP, "Non signalled machine check",
102 SER, BITCLR(MCI_STATUS_S)
103 ),
81 104
82 /* AR add known MCACODs here */ 105 /* AR add known MCACODs here */
83 MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_SAR, PANIC, 106 MCESEV(
84 "Action required with lost events", SER), 107 PANIC, "Action required with lost events",
85 MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_SAR, PANIC, 108 SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
86 "Action required; unknown MCACOD", SER), 109 ),
110 MCESEV(
111 PANIC, "Action required: unknown MCACOD",
112 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
113 ),
87 114
88 /* known AO MCACODs: */ 115 /* known AO MCACODs: */
89 MASK(MCI_UC_SAR|MCI_STATUS_OVER|0xfff0, MCI_UC_S|0xc0, AO, 116 MCESEV(
90 "Action optional: memory scrubbing error", SER), 117 AO, "Action optional: memory scrubbing error",
91 MASK(MCI_UC_SAR|MCI_STATUS_OVER|MCACOD, MCI_UC_S|0x17a, AO, 118 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|0xfff0, MCI_UC_S|0x00c0)
92 "Action optional: last level cache writeback error", SER), 119 ),
93 120 MCESEV(
94 MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S, SOME, 121 AO, "Action optional: last level cache writeback error",
95 "Action optional unknown MCACOD", SER), 122 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|0x017a)
96 MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S|MCI_STATUS_OVER, SOME, 123 ),
97 "Action optional with lost events", SER), 124 MCESEV(
98 BITSET(MCI_STATUS_UC|MCI_STATUS_OVER, PANIC, "Overflowed uncorrected"), 125 SOME, "Action optional: unknown MCACOD",
99 BITSET(MCI_STATUS_UC, UC, "Uncorrected"), 126 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
100 BITSET(0, SOME, "No match") /* always matches. keep at end */ 127 ),
128 MCESEV(
129 SOME, "Action optional with lost events",
130 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
131 ),
132
133 MCESEV(
134 PANIC, "Overflowed uncorrected",
135 BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
136 ),
137 MCESEV(
138 UC, "Uncorrected",
139 BITSET(MCI_STATUS_UC)
140 ),
141 MCESEV(
142 SOME, "No match",
143 BITSET(0)
144 ) /* always matches. keep at end */
101}; 145};
102 146
103/* 147/*
@@ -112,15 +156,15 @@ static int error_context(struct mce *m)
112 return IN_KERNEL; 156 return IN_KERNEL;
113} 157}
114 158
115int mce_severity(struct mce *a, int tolerant, char **msg) 159int mce_severity(struct mce *m, int tolerant, char **msg)
116{ 160{
117 enum context ctx = error_context(a); 161 enum context ctx = error_context(m);
118 struct severity *s; 162 struct severity *s;
119 163
120 for (s = severities;; s++) { 164 for (s = severities;; s++) {
121 if ((a->status & s->mask) != s->result) 165 if ((m->status & s->mask) != s->result)
122 continue; 166 continue;
123 if ((a->mcgstatus & s->mcgmask) != s->mcgres) 167 if ((m->mcgstatus & s->mcgmask) != s->mcgres)
124 continue; 168 continue;
125 if (s->ser == SER_REQUIRED && !mce_ser) 169 if (s->ser == SER_REQUIRED && !mce_ser)
126 continue; 170 continue;
@@ -197,15 +241,15 @@ static const struct file_operations severities_coverage_fops = {
197 241
198static int __init severities_debugfs_init(void) 242static int __init severities_debugfs_init(void)
199{ 243{
200 struct dentry *dmce = NULL, *fseverities_coverage = NULL; 244 struct dentry *dmce, *fsev;
201 245
202 dmce = mce_get_debugfs_dir(); 246 dmce = mce_get_debugfs_dir();
203 if (dmce == NULL) 247 if (!dmce)
204 goto err_out; 248 goto err_out;
205 fseverities_coverage = debugfs_create_file("severities-coverage", 249
206 0444, dmce, NULL, 250 fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
207 &severities_coverage_fops); 251 &severities_coverage_fops);
208 if (fseverities_coverage == NULL) 252 if (!fsev)
209 goto err_out; 253 goto err_out;
210 254
211 return 0; 255 return 0;
@@ -214,4 +258,4 @@ err_out:
214 return -ENOMEM; 258 return -ENOMEM;
215} 259}
216late_initcall(severities_debugfs_init); 260late_initcall(severities_debugfs_init);
217#endif 261#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index ff1ae9b6464d..08363b042122 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -10,7 +10,6 @@
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11#include <linux/capability.h> 11#include <linux/capability.h>
12#include <linux/miscdevice.h> 12#include <linux/miscdevice.h>
13#include <linux/interrupt.h>
14#include <linux/ratelimit.h> 13#include <linux/ratelimit.h>
15#include <linux/kallsyms.h> 14#include <linux/kallsyms.h>
16#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
@@ -38,23 +37,20 @@
38#include <linux/mm.h> 37#include <linux/mm.h>
39#include <linux/debugfs.h> 38#include <linux/debugfs.h>
40#include <linux/edac_mce.h> 39#include <linux/edac_mce.h>
40#include <linux/irq_work.h>
41 41
42#include <asm/processor.h> 42#include <asm/processor.h>
43#include <asm/hw_irq.h>
44#include <asm/apic.h>
45#include <asm/idle.h>
46#include <asm/ipi.h>
47#include <asm/mce.h> 43#include <asm/mce.h>
48#include <asm/msr.h> 44#include <asm/msr.h>
49 45
50#include "mce-internal.h" 46#include "mce-internal.h"
51 47
52static DEFINE_MUTEX(mce_read_mutex); 48static DEFINE_MUTEX(mce_chrdev_read_mutex);
53 49
54#define rcu_dereference_check_mce(p) \ 50#define rcu_dereference_check_mce(p) \
55 rcu_dereference_index_check((p), \ 51 rcu_dereference_index_check((p), \
56 rcu_read_lock_sched_held() || \ 52 rcu_read_lock_sched_held() || \
57 lockdep_is_held(&mce_read_mutex)) 53 lockdep_is_held(&mce_chrdev_read_mutex))
58 54
59#define CREATE_TRACE_POINTS 55#define CREATE_TRACE_POINTS
60#include <trace/events/mce.h> 56#include <trace/events/mce.h>
@@ -94,7 +90,8 @@ static unsigned long mce_need_notify;
94static char mce_helper[128]; 90static char mce_helper[128];
95static char *mce_helper_argv[2] = { mce_helper, NULL }; 91static char *mce_helper_argv[2] = { mce_helper, NULL };
96 92
97static DECLARE_WAIT_QUEUE_HEAD(mce_wait); 93static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
94
98static DEFINE_PER_CPU(struct mce, mces_seen); 95static DEFINE_PER_CPU(struct mce, mces_seen);
99static int cpu_missing; 96static int cpu_missing;
100 97
@@ -373,6 +370,31 @@ static void mce_wrmsrl(u32 msr, u64 v)
373} 370}
374 371
375/* 372/*
373 * Collect all global (w.r.t. this processor) status about this machine
374 * check into our "mce" struct so that we can use it later to assess
375 * the severity of the problem as we read per-bank specific details.
376 */
377static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
378{
379 mce_setup(m);
380
381 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
382 if (regs) {
383 /*
384 * Get the address of the instruction at the time of
385 * the machine check error.
386 */
387 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
388 m->ip = regs->ip;
389 m->cs = regs->cs;
390 }
391 /* Use accurate RIP reporting if available. */
392 if (rip_msr)
393 m->ip = mce_rdmsrl(rip_msr);
394 }
395}
396
397/*
376 * Simple lockless ring to communicate PFNs from the exception handler with the 398 * Simple lockless ring to communicate PFNs from the exception handler with the
377 * process context work function. This is vastly simplified because there's 399 * process context work function. This is vastly simplified because there's
378 * only a single reader and a single writer. 400 * only a single reader and a single writer.
@@ -443,40 +465,13 @@ static void mce_schedule_work(void)
443 } 465 }
444} 466}
445 467
446/* 468DEFINE_PER_CPU(struct irq_work, mce_irq_work);
447 * Get the address of the instruction at the time of the machine check
448 * error.
449 */
450static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
451{
452
453 if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
454 m->ip = regs->ip;
455 m->cs = regs->cs;
456 } else {
457 m->ip = 0;
458 m->cs = 0;
459 }
460 if (rip_msr)
461 m->ip = mce_rdmsrl(rip_msr);
462}
463 469
464#ifdef CONFIG_X86_LOCAL_APIC 470static void mce_irq_work_cb(struct irq_work *entry)
465/*
466 * Called after interrupts have been reenabled again
467 * when a MCE happened during an interrupts off region
468 * in the kernel.
469 */
470asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
471{ 471{
472 ack_APIC_irq();
473 exit_idle();
474 irq_enter();
475 mce_notify_irq(); 472 mce_notify_irq();
476 mce_schedule_work(); 473 mce_schedule_work();
477 irq_exit();
478} 474}
479#endif
480 475
481static void mce_report_event(struct pt_regs *regs) 476static void mce_report_event(struct pt_regs *regs)
482{ 477{
@@ -492,29 +487,7 @@ static void mce_report_event(struct pt_regs *regs)
492 return; 487 return;
493 } 488 }
494 489
495#ifdef CONFIG_X86_LOCAL_APIC 490 irq_work_queue(&__get_cpu_var(mce_irq_work));
496 /*
497 * Without APIC do not notify. The event will be picked
498 * up eventually.
499 */
500 if (!cpu_has_apic)
501 return;
502
503 /*
504 * When interrupts are disabled we cannot use
505 * kernel services safely. Trigger an self interrupt
506 * through the APIC to instead do the notification
507 * after interrupts are reenabled again.
508 */
509 apic->send_IPI_self(MCE_SELF_VECTOR);
510
511 /*
512 * Wait for idle afterwards again so that we don't leave the
513 * APIC in a non idle state because the normal APIC writes
514 * cannot exclude us.
515 */
516 apic_wait_icr_idle();
517#endif
518} 491}
519 492
520DEFINE_PER_CPU(unsigned, mce_poll_count); 493DEFINE_PER_CPU(unsigned, mce_poll_count);
@@ -541,9 +514,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
541 514
542 percpu_inc(mce_poll_count); 515 percpu_inc(mce_poll_count);
543 516
544 mce_setup(&m); 517 mce_gather_info(&m, NULL);
545 518
546 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
547 for (i = 0; i < banks; i++) { 519 for (i = 0; i < banks; i++) {
548 if (!mce_banks[i].ctl || !test_bit(i, *b)) 520 if (!mce_banks[i].ctl || !test_bit(i, *b))
549 continue; 521 continue;
@@ -879,9 +851,9 @@ static int mce_usable_address(struct mce *m)
879{ 851{
880 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) 852 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
881 return 0; 853 return 0;
882 if ((m->misc & 0x3f) > PAGE_SHIFT) 854 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
883 return 0; 855 return 0;
884 if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS) 856 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
885 return 0; 857 return 0;
886 return 1; 858 return 1;
887} 859}
@@ -942,9 +914,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
942 if (!banks) 914 if (!banks)
943 goto out; 915 goto out;
944 916
945 mce_setup(&m); 917 mce_gather_info(&m, regs);
946 918
947 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
948 final = &__get_cpu_var(mces_seen); 919 final = &__get_cpu_var(mces_seen);
949 *final = m; 920 *final = m;
950 921
@@ -1028,7 +999,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1028 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) 999 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1029 mce_ring_add(m.addr >> PAGE_SHIFT); 1000 mce_ring_add(m.addr >> PAGE_SHIFT);
1030 1001
1031 mce_get_rip(&m, regs);
1032 mce_log(&m); 1002 mce_log(&m);
1033 1003
1034 if (severity > worst) { 1004 if (severity > worst) {
@@ -1190,7 +1160,8 @@ int mce_notify_irq(void)
1190 clear_thread_flag(TIF_MCE_NOTIFY); 1160 clear_thread_flag(TIF_MCE_NOTIFY);
1191 1161
1192 if (test_and_clear_bit(0, &mce_need_notify)) { 1162 if (test_and_clear_bit(0, &mce_need_notify)) {
1193 wake_up_interruptible(&mce_wait); 1163 /* wake processes polling /dev/mcelog */
1164 wake_up_interruptible(&mce_chrdev_wait);
1194 1165
1195 /* 1166 /*
1196 * There is no risk of missing notifications because 1167 * There is no risk of missing notifications because
@@ -1363,18 +1334,23 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1363 return 0; 1334 return 0;
1364} 1335}
1365 1336
1366static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1337static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1367{ 1338{
1368 if (c->x86 != 5) 1339 if (c->x86 != 5)
1369 return; 1340 return 0;
1341
1370 switch (c->x86_vendor) { 1342 switch (c->x86_vendor) {
1371 case X86_VENDOR_INTEL: 1343 case X86_VENDOR_INTEL:
1372 intel_p5_mcheck_init(c); 1344 intel_p5_mcheck_init(c);
1345 return 1;
1373 break; 1346 break;
1374 case X86_VENDOR_CENTAUR: 1347 case X86_VENDOR_CENTAUR:
1375 winchip_mcheck_init(c); 1348 winchip_mcheck_init(c);
1349 return 1;
1376 break; 1350 break;
1377 } 1351 }
1352
1353 return 0;
1378} 1354}
1379 1355
1380static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 1356static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
@@ -1428,7 +1404,8 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
1428 if (mce_disabled) 1404 if (mce_disabled)
1429 return; 1405 return;
1430 1406
1431 __mcheck_cpu_ancient_init(c); 1407 if (__mcheck_cpu_ancient_init(c))
1408 return;
1432 1409
1433 if (!mce_available(c)) 1410 if (!mce_available(c))
1434 return; 1411 return;
@@ -1444,44 +1421,45 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
1444 __mcheck_cpu_init_vendor(c); 1421 __mcheck_cpu_init_vendor(c);
1445 __mcheck_cpu_init_timer(); 1422 __mcheck_cpu_init_timer();
1446 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); 1423 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
1447 1424 init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
1448} 1425}
1449 1426
1450/* 1427/*
1451 * Character device to read and clear the MCE log. 1428 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1452 */ 1429 */
1453 1430
1454static DEFINE_SPINLOCK(mce_state_lock); 1431static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1455static int open_count; /* #times opened */ 1432static int mce_chrdev_open_count; /* #times opened */
1456static int open_exclu; /* already open exclusive? */ 1433static int mce_chrdev_open_exclu; /* already open exclusive? */
1457 1434
1458static int mce_open(struct inode *inode, struct file *file) 1435static int mce_chrdev_open(struct inode *inode, struct file *file)
1459{ 1436{
1460 spin_lock(&mce_state_lock); 1437 spin_lock(&mce_chrdev_state_lock);
1461 1438
1462 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { 1439 if (mce_chrdev_open_exclu ||
1463 spin_unlock(&mce_state_lock); 1440 (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1441 spin_unlock(&mce_chrdev_state_lock);
1464 1442
1465 return -EBUSY; 1443 return -EBUSY;
1466 } 1444 }
1467 1445
1468 if (file->f_flags & O_EXCL) 1446 if (file->f_flags & O_EXCL)
1469 open_exclu = 1; 1447 mce_chrdev_open_exclu = 1;
1470 open_count++; 1448 mce_chrdev_open_count++;
1471 1449
1472 spin_unlock(&mce_state_lock); 1450 spin_unlock(&mce_chrdev_state_lock);
1473 1451
1474 return nonseekable_open(inode, file); 1452 return nonseekable_open(inode, file);
1475} 1453}
1476 1454
1477static int mce_release(struct inode *inode, struct file *file) 1455static int mce_chrdev_release(struct inode *inode, struct file *file)
1478{ 1456{
1479 spin_lock(&mce_state_lock); 1457 spin_lock(&mce_chrdev_state_lock);
1480 1458
1481 open_count--; 1459 mce_chrdev_open_count--;
1482 open_exclu = 0; 1460 mce_chrdev_open_exclu = 0;
1483 1461
1484 spin_unlock(&mce_state_lock); 1462 spin_unlock(&mce_chrdev_state_lock);
1485 1463
1486 return 0; 1464 return 0;
1487} 1465}
@@ -1530,8 +1508,8 @@ static int __mce_read_apei(char __user **ubuf, size_t usize)
1530 return 0; 1508 return 0;
1531} 1509}
1532 1510
1533static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 1511static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1534 loff_t *off) 1512 size_t usize, loff_t *off)
1535{ 1513{
1536 char __user *buf = ubuf; 1514 char __user *buf = ubuf;
1537 unsigned long *cpu_tsc; 1515 unsigned long *cpu_tsc;
@@ -1542,7 +1520,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1542 if (!cpu_tsc) 1520 if (!cpu_tsc)
1543 return -ENOMEM; 1521 return -ENOMEM;
1544 1522
1545 mutex_lock(&mce_read_mutex); 1523 mutex_lock(&mce_chrdev_read_mutex);
1546 1524
1547 if (!mce_apei_read_done) { 1525 if (!mce_apei_read_done) {
1548 err = __mce_read_apei(&buf, usize); 1526 err = __mce_read_apei(&buf, usize);
@@ -1562,19 +1540,18 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1562 do { 1540 do {
1563 for (i = prev; i < next; i++) { 1541 for (i = prev; i < next; i++) {
1564 unsigned long start = jiffies; 1542 unsigned long start = jiffies;
1543 struct mce *m = &mcelog.entry[i];
1565 1544
1566 while (!mcelog.entry[i].finished) { 1545 while (!m->finished) {
1567 if (time_after_eq(jiffies, start + 2)) { 1546 if (time_after_eq(jiffies, start + 2)) {
1568 memset(mcelog.entry + i, 0, 1547 memset(m, 0, sizeof(*m));
1569 sizeof(struct mce));
1570 goto timeout; 1548 goto timeout;
1571 } 1549 }
1572 cpu_relax(); 1550 cpu_relax();
1573 } 1551 }
1574 smp_rmb(); 1552 smp_rmb();
1575 err |= copy_to_user(buf, mcelog.entry + i, 1553 err |= copy_to_user(buf, m, sizeof(*m));
1576 sizeof(struct mce)); 1554 buf += sizeof(*m);
1577 buf += sizeof(struct mce);
1578timeout: 1555timeout:
1579 ; 1556 ;
1580 } 1557 }
@@ -1594,13 +1571,13 @@ timeout:
1594 on_each_cpu(collect_tscs, cpu_tsc, 1); 1571 on_each_cpu(collect_tscs, cpu_tsc, 1);
1595 1572
1596 for (i = next; i < MCE_LOG_LEN; i++) { 1573 for (i = next; i < MCE_LOG_LEN; i++) {
1597 if (mcelog.entry[i].finished && 1574 struct mce *m = &mcelog.entry[i];
1598 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 1575
1599 err |= copy_to_user(buf, mcelog.entry+i, 1576 if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1600 sizeof(struct mce)); 1577 err |= copy_to_user(buf, m, sizeof(*m));
1601 smp_rmb(); 1578 smp_rmb();
1602 buf += sizeof(struct mce); 1579 buf += sizeof(*m);
1603 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 1580 memset(m, 0, sizeof(*m));
1604 } 1581 }
1605 } 1582 }
1606 1583
@@ -1608,15 +1585,15 @@ timeout:
1608 err = -EFAULT; 1585 err = -EFAULT;
1609 1586
1610out: 1587out:
1611 mutex_unlock(&mce_read_mutex); 1588 mutex_unlock(&mce_chrdev_read_mutex);
1612 kfree(cpu_tsc); 1589 kfree(cpu_tsc);
1613 1590
1614 return err ? err : buf - ubuf; 1591 return err ? err : buf - ubuf;
1615} 1592}
1616 1593
1617static unsigned int mce_poll(struct file *file, poll_table *wait) 1594static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1618{ 1595{
1619 poll_wait(file, &mce_wait, wait); 1596 poll_wait(file, &mce_chrdev_wait, wait);
1620 if (rcu_access_index(mcelog.next)) 1597 if (rcu_access_index(mcelog.next))
1621 return POLLIN | POLLRDNORM; 1598 return POLLIN | POLLRDNORM;
1622 if (!mce_apei_read_done && apei_check_mce()) 1599 if (!mce_apei_read_done && apei_check_mce())
@@ -1624,7 +1601,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
1624 return 0; 1601 return 0;
1625} 1602}
1626 1603
1627static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1604static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1605 unsigned long arg)
1628{ 1606{
1629 int __user *p = (int __user *)arg; 1607 int __user *p = (int __user *)arg;
1630 1608
@@ -1652,16 +1630,16 @@ static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1652 1630
1653/* Modified in mce-inject.c, so not static or const */ 1631/* Modified in mce-inject.c, so not static or const */
1654struct file_operations mce_chrdev_ops = { 1632struct file_operations mce_chrdev_ops = {
1655 .open = mce_open, 1633 .open = mce_chrdev_open,
1656 .release = mce_release, 1634 .release = mce_chrdev_release,
1657 .read = mce_read, 1635 .read = mce_chrdev_read,
1658 .poll = mce_poll, 1636 .poll = mce_chrdev_poll,
1659 .unlocked_ioctl = mce_ioctl, 1637 .unlocked_ioctl = mce_chrdev_ioctl,
1660 .llseek = no_llseek, 1638 .llseek = no_llseek,
1661}; 1639};
1662EXPORT_SYMBOL_GPL(mce_chrdev_ops); 1640EXPORT_SYMBOL_GPL(mce_chrdev_ops);
1663 1641
1664static struct miscdevice mce_log_device = { 1642static struct miscdevice mce_chrdev_device = {
1665 MISC_MCELOG_MINOR, 1643 MISC_MCELOG_MINOR,
1666 "mcelog", 1644 "mcelog",
1667 &mce_chrdev_ops, 1645 &mce_chrdev_ops,
@@ -1719,7 +1697,7 @@ int __init mcheck_init(void)
1719} 1697}
1720 1698
1721/* 1699/*
1722 * Sysfs support 1700 * mce_syscore: PM support
1723 */ 1701 */
1724 1702
1725/* 1703/*
@@ -1739,12 +1717,12 @@ static int mce_disable_error_reporting(void)
1739 return 0; 1717 return 0;
1740} 1718}
1741 1719
1742static int mce_suspend(void) 1720static int mce_syscore_suspend(void)
1743{ 1721{
1744 return mce_disable_error_reporting(); 1722 return mce_disable_error_reporting();
1745} 1723}
1746 1724
1747static void mce_shutdown(void) 1725static void mce_syscore_shutdown(void)
1748{ 1726{
1749 mce_disable_error_reporting(); 1727 mce_disable_error_reporting();
1750} 1728}
@@ -1754,18 +1732,22 @@ static void mce_shutdown(void)
1754 * Only one CPU is active at this time, the others get re-added later using 1732 * Only one CPU is active at this time, the others get re-added later using
1755 * CPU hotplug: 1733 * CPU hotplug:
1756 */ 1734 */
1757static void mce_resume(void) 1735static void mce_syscore_resume(void)
1758{ 1736{
1759 __mcheck_cpu_init_generic(); 1737 __mcheck_cpu_init_generic();
1760 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 1738 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
1761} 1739}
1762 1740
1763static struct syscore_ops mce_syscore_ops = { 1741static struct syscore_ops mce_syscore_ops = {
1764 .suspend = mce_suspend, 1742 .suspend = mce_syscore_suspend,
1765 .shutdown = mce_shutdown, 1743 .shutdown = mce_syscore_shutdown,
1766 .resume = mce_resume, 1744 .resume = mce_syscore_resume,
1767}; 1745};
1768 1746
1747/*
1748 * mce_sysdev: Sysfs support
1749 */
1750
1769static void mce_cpu_restart(void *data) 1751static void mce_cpu_restart(void *data)
1770{ 1752{
1771 del_timer_sync(&__get_cpu_var(mce_timer)); 1753 del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1801,11 +1783,11 @@ static void mce_enable_ce(void *all)
1801 __mcheck_cpu_init_timer(); 1783 __mcheck_cpu_init_timer();
1802} 1784}
1803 1785
1804static struct sysdev_class mce_sysclass = { 1786static struct sysdev_class mce_sysdev_class = {
1805 .name = "machinecheck", 1787 .name = "machinecheck",
1806}; 1788};
1807 1789
1808DEFINE_PER_CPU(struct sys_device, mce_dev); 1790DEFINE_PER_CPU(struct sys_device, mce_sysdev);
1809 1791
1810__cpuinitdata 1792__cpuinitdata
1811void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 1793void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
@@ -1934,7 +1916,7 @@ static struct sysdev_ext_attribute attr_cmci_disabled = {
1934 &mce_cmci_disabled 1916 &mce_cmci_disabled
1935}; 1917};
1936 1918
1937static struct sysdev_attribute *mce_attrs[] = { 1919static struct sysdev_attribute *mce_sysdev_attrs[] = {
1938 &attr_tolerant.attr, 1920 &attr_tolerant.attr,
1939 &attr_check_interval.attr, 1921 &attr_check_interval.attr,
1940 &attr_trigger, 1922 &attr_trigger,
@@ -1945,66 +1927,67 @@ static struct sysdev_attribute *mce_attrs[] = {
1945 NULL 1927 NULL
1946}; 1928};
1947 1929
1948static cpumask_var_t mce_dev_initialized; 1930static cpumask_var_t mce_sysdev_initialized;
1949 1931
1950/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */ 1932/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
1951static __cpuinit int mce_create_device(unsigned int cpu) 1933static __cpuinit int mce_sysdev_create(unsigned int cpu)
1952{ 1934{
1935 struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
1953 int err; 1936 int err;
1954 int i, j; 1937 int i, j;
1955 1938
1956 if (!mce_available(&boot_cpu_data)) 1939 if (!mce_available(&boot_cpu_data))
1957 return -EIO; 1940 return -EIO;
1958 1941
1959 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject)); 1942 memset(&sysdev->kobj, 0, sizeof(struct kobject));
1960 per_cpu(mce_dev, cpu).id = cpu; 1943 sysdev->id = cpu;
1961 per_cpu(mce_dev, cpu).cls = &mce_sysclass; 1944 sysdev->cls = &mce_sysdev_class;
1962 1945
1963 err = sysdev_register(&per_cpu(mce_dev, cpu)); 1946 err = sysdev_register(sysdev);
1964 if (err) 1947 if (err)
1965 return err; 1948 return err;
1966 1949
1967 for (i = 0; mce_attrs[i]; i++) { 1950 for (i = 0; mce_sysdev_attrs[i]; i++) {
1968 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); 1951 err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
1969 if (err) 1952 if (err)
1970 goto error; 1953 goto error;
1971 } 1954 }
1972 for (j = 0; j < banks; j++) { 1955 for (j = 0; j < banks; j++) {
1973 err = sysdev_create_file(&per_cpu(mce_dev, cpu), 1956 err = sysdev_create_file(sysdev, &mce_banks[j].attr);
1974 &mce_banks[j].attr);
1975 if (err) 1957 if (err)
1976 goto error2; 1958 goto error2;
1977 } 1959 }
1978 cpumask_set_cpu(cpu, mce_dev_initialized); 1960 cpumask_set_cpu(cpu, mce_sysdev_initialized);
1979 1961
1980 return 0; 1962 return 0;
1981error2: 1963error2:
1982 while (--j >= 0) 1964 while (--j >= 0)
1983 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); 1965 sysdev_remove_file(sysdev, &mce_banks[j].attr);
1984error: 1966error:
1985 while (--i >= 0) 1967 while (--i >= 0)
1986 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); 1968 sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
1987 1969
1988 sysdev_unregister(&per_cpu(mce_dev, cpu)); 1970 sysdev_unregister(sysdev);
1989 1971
1990 return err; 1972 return err;
1991} 1973}
1992 1974
1993static __cpuinit void mce_remove_device(unsigned int cpu) 1975static __cpuinit void mce_sysdev_remove(unsigned int cpu)
1994{ 1976{
1977 struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
1995 int i; 1978 int i;
1996 1979
1997 if (!cpumask_test_cpu(cpu, mce_dev_initialized)) 1980 if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
1998 return; 1981 return;
1999 1982
2000 for (i = 0; mce_attrs[i]; i++) 1983 for (i = 0; mce_sysdev_attrs[i]; i++)
2001 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); 1984 sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
2002 1985
2003 for (i = 0; i < banks; i++) 1986 for (i = 0; i < banks; i++)
2004 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr); 1987 sysdev_remove_file(sysdev, &mce_banks[i].attr);
2005 1988
2006 sysdev_unregister(&per_cpu(mce_dev, cpu)); 1989 sysdev_unregister(sysdev);
2007 cpumask_clear_cpu(cpu, mce_dev_initialized); 1990 cpumask_clear_cpu(cpu, mce_sysdev_initialized);
2008} 1991}
2009 1992
2010/* Make sure there are no machine checks on offlined CPUs. */ 1993/* Make sure there are no machine checks on offlined CPUs. */
@@ -2054,7 +2037,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2054 switch (action) { 2037 switch (action) {
2055 case CPU_ONLINE: 2038 case CPU_ONLINE:
2056 case CPU_ONLINE_FROZEN: 2039 case CPU_ONLINE_FROZEN:
2057 mce_create_device(cpu); 2040 mce_sysdev_create(cpu);
2058 if (threshold_cpu_callback) 2041 if (threshold_cpu_callback)
2059 threshold_cpu_callback(action, cpu); 2042 threshold_cpu_callback(action, cpu);
2060 break; 2043 break;
@@ -2062,7 +2045,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2062 case CPU_DEAD_FROZEN: 2045 case CPU_DEAD_FROZEN:
2063 if (threshold_cpu_callback) 2046 if (threshold_cpu_callback)
2064 threshold_cpu_callback(action, cpu); 2047 threshold_cpu_callback(action, cpu);
2065 mce_remove_device(cpu); 2048 mce_sysdev_remove(cpu);
2066 break; 2049 break;
2067 case CPU_DOWN_PREPARE: 2050 case CPU_DOWN_PREPARE:
2068 case CPU_DOWN_PREPARE_FROZEN: 2051 case CPU_DOWN_PREPARE_FROZEN:
@@ -2116,27 +2099,28 @@ static __init int mcheck_init_device(void)
2116 if (!mce_available(&boot_cpu_data)) 2099 if (!mce_available(&boot_cpu_data))
2117 return -EIO; 2100 return -EIO;
2118 2101
2119 zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL); 2102 zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
2120 2103
2121 mce_init_banks(); 2104 mce_init_banks();
2122 2105
2123 err = sysdev_class_register(&mce_sysclass); 2106 err = sysdev_class_register(&mce_sysdev_class);
2124 if (err) 2107 if (err)
2125 return err; 2108 return err;
2126 2109
2127 for_each_online_cpu(i) { 2110 for_each_online_cpu(i) {
2128 err = mce_create_device(i); 2111 err = mce_sysdev_create(i);
2129 if (err) 2112 if (err)
2130 return err; 2113 return err;
2131 } 2114 }
2132 2115
2133 register_syscore_ops(&mce_syscore_ops); 2116 register_syscore_ops(&mce_syscore_ops);
2134 register_hotcpu_notifier(&mce_cpu_notifier); 2117 register_hotcpu_notifier(&mce_cpu_notifier);
2135 misc_register(&mce_log_device); 2118
2119 /* register character device /dev/mcelog */
2120 misc_register(&mce_chrdev_device);
2136 2121
2137 return err; 2122 return err;
2138} 2123}
2139
2140device_initcall(mcheck_init_device); 2124device_initcall(mcheck_init_device);
2141 2125
2142/* 2126/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index bb0adad35143..f5474218cffe 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -548,7 +548,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
548 if (!b) 548 if (!b)
549 goto out; 549 goto out;
550 550
551 err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj, 551 err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj,
552 b->kobj, name); 552 b->kobj, name);
553 if (err) 553 if (err)
554 goto out; 554 goto out;
@@ -571,7 +571,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
571 goto out; 571 goto out;
572 } 572 }
573 573
574 b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj); 574 b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj);
575 if (!b->kobj) 575 if (!b->kobj)
576 goto out_free; 576 goto out_free;
577 577
@@ -591,7 +591,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
591 if (i == cpu) 591 if (i == cpu)
592 continue; 592 continue;
593 593
594 err = sysfs_create_link(&per_cpu(mce_dev, i).kobj, 594 err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj,
595 b->kobj, name); 595 b->kobj, name);
596 if (err) 596 if (err)
597 goto out; 597 goto out;
@@ -669,7 +669,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
669#ifdef CONFIG_SMP 669#ifdef CONFIG_SMP
670 /* sibling symlink */ 670 /* sibling symlink */
671 if (shared_bank[bank] && b->blocks->cpu != cpu) { 671 if (shared_bank[bank] && b->blocks->cpu != cpu) {
672 sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name); 672 sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name);
673 per_cpu(threshold_banks, cpu)[bank] = NULL; 673 per_cpu(threshold_banks, cpu)[bank] = NULL;
674 674
675 return; 675 return;
@@ -681,7 +681,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
681 if (i == cpu) 681 if (i == cpu)
682 continue; 682 continue;
683 683
684 sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name); 684 sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name);
685 per_cpu(threshold_banks, i)[bank] = NULL; 685 per_cpu(threshold_banks, i)[bank] = NULL;
686 } 686 }
687 687
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 929739a653d1..08119a37e53c 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -79,7 +79,6 @@ void set_mtrr_ops(const struct mtrr_ops *ops)
79static int have_wrcomb(void) 79static int have_wrcomb(void)
80{ 80{
81 struct pci_dev *dev; 81 struct pci_dev *dev;
82 u8 rev;
83 82
84 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); 83 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
85 if (dev != NULL) { 84 if (dev != NULL) {
@@ -89,13 +88,11 @@ static int have_wrcomb(void)
89 * chipsets to be tagged 88 * chipsets to be tagged
90 */ 89 */
91 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 90 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
92 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { 91 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
93 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 92 dev->revision <= 5) {
94 if (rev <= 5) { 93 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
95 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 94 pci_dev_put(dev);
96 pci_dev_put(dev); 95 return 0;
97 return 0;
98 }
99 } 96 }
100 /* 97 /*
101 * Intel 450NX errata # 23. Non ascending cacheline evictions to 98 * Intel 450NX errata # 23. Non ascending cacheline evictions to
@@ -137,55 +134,43 @@ static void __init init_table(void)
137} 134}
138 135
139struct set_mtrr_data { 136struct set_mtrr_data {
140 atomic_t count;
141 atomic_t gate;
142 unsigned long smp_base; 137 unsigned long smp_base;
143 unsigned long smp_size; 138 unsigned long smp_size;
144 unsigned int smp_reg; 139 unsigned int smp_reg;
145 mtrr_type smp_type; 140 mtrr_type smp_type;
146}; 141};
147 142
148static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
149
150/** 143/**
151 * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs. 144 * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
145 * by all the CPUs.
152 * @info: pointer to mtrr configuration data 146 * @info: pointer to mtrr configuration data
153 * 147 *
154 * Returns nothing. 148 * Returns nothing.
155 */ 149 */
156static int mtrr_work_handler(void *info) 150static int mtrr_rendezvous_handler(void *info)
157{ 151{
158#ifdef CONFIG_SMP 152#ifdef CONFIG_SMP
159 struct set_mtrr_data *data = info; 153 struct set_mtrr_data *data = info;
160 unsigned long flags;
161
162 atomic_dec(&data->count);
163 while (!atomic_read(&data->gate))
164 cpu_relax();
165
166 local_irq_save(flags);
167
168 atomic_dec(&data->count);
169 while (atomic_read(&data->gate))
170 cpu_relax();
171 154
172 /* The master has cleared me to execute */ 155 /*
156 * We use this same function to initialize the mtrrs during boot,
157 * resume, runtime cpu online and on an explicit request to set a
158 * specific MTRR.
159 *
160 * During boot or suspend, the state of the boot cpu's mtrrs has been
161 * saved, and we want to replicate that across all the cpus that come
162 * online (either at the end of boot or resume or during a runtime cpu
163 * online). If we're doing that, @reg is set to something special and on
164 * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
165 * started the boot/resume sequence, this might be a duplicate
166 * set_all()).
167 */
173 if (data->smp_reg != ~0U) { 168 if (data->smp_reg != ~0U) {
174 mtrr_if->set(data->smp_reg, data->smp_base, 169 mtrr_if->set(data->smp_reg, data->smp_base,
175 data->smp_size, data->smp_type); 170 data->smp_size, data->smp_type);
176 } else if (mtrr_aps_delayed_init) { 171 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
177 /*
178 * Initialize the MTRRs inaddition to the synchronisation.
179 */
180 mtrr_if->set_all(); 172 mtrr_if->set_all();
181 } 173 }
182
183 atomic_dec(&data->count);
184 while (!atomic_read(&data->gate))
185 cpu_relax();
186
187 atomic_dec(&data->count);
188 local_irq_restore(flags);
189#endif 174#endif
190 return 0; 175 return 0;
191} 176}
@@ -223,20 +208,11 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
223 * 14. Wait for buddies to catch up 208 * 14. Wait for buddies to catch up
224 * 15. Enable interrupts. 209 * 15. Enable interrupts.
225 * 210 *
226 * What does that mean for us? Well, first we set data.count to the number 211 * What does that mean for us? Well, stop_machine() will ensure that
227 * of CPUs. As each CPU announces that it started the rendezvous handler by 212 * the rendezvous handler is started on each CPU. And in lockstep they
228 * decrementing the count, We reset data.count and set the data.gate flag 213 * do the state transition of disabling interrupts, updating MTRR's
229 * allowing all the cpu's to proceed with the work. As each cpu disables 214 * (the CPU vendors may each do it differently, so we call mtrr_if->set()
230 * interrupts, it'll decrement data.count once. We wait until it hits 0 and 215 * callback and let them take care of it.) and enabling interrupts.
231 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
232 * are waiting for that flag to be cleared. Once it's cleared, each
233 * CPU goes through the transition of updating MTRRs.
234 * The CPU vendors may each do it differently,
235 * so we call mtrr_if->set() callback and let them take care of it.
236 * When they're done, they again decrement data->count and wait for data.gate
237 * to be set.
238 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
239 * Everyone then enables interrupts and we all continue on.
240 * 216 *
241 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 217 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
242 * becomes nops. 218 * becomes nops.
@@ -244,92 +220,26 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
244static void 220static void
245set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) 221set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
246{ 222{
247 struct set_mtrr_data data; 223 struct set_mtrr_data data = { .smp_reg = reg,
248 unsigned long flags; 224 .smp_base = base,
249 int cpu; 225 .smp_size = size,
250 226 .smp_type = type
251 preempt_disable(); 227 };
252
253 data.smp_reg = reg;
254 data.smp_base = base;
255 data.smp_size = size;
256 data.smp_type = type;
257 atomic_set(&data.count, num_booting_cpus() - 1);
258
259 /* Make sure data.count is visible before unleashing other CPUs */
260 smp_wmb();
261 atomic_set(&data.gate, 0);
262
263 /* Start the ball rolling on other CPUs */
264 for_each_online_cpu(cpu) {
265 struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
266
267 if (cpu == smp_processor_id())
268 continue;
269
270 stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
271 }
272
273
274 while (atomic_read(&data.count))
275 cpu_relax();
276
277 /* Ok, reset count and toggle gate */
278 atomic_set(&data.count, num_booting_cpus() - 1);
279 smp_wmb();
280 atomic_set(&data.gate, 1);
281
282 local_irq_save(flags);
283
284 while (atomic_read(&data.count))
285 cpu_relax();
286
287 /* Ok, reset count and toggle gate */
288 atomic_set(&data.count, num_booting_cpus() - 1);
289 smp_wmb();
290 atomic_set(&data.gate, 0);
291
292 /* Do our MTRR business */
293
294 /*
295 * HACK!
296 *
297 * We use this same function to initialize the mtrrs during boot,
298 * resume, runtime cpu online and on an explicit request to set a
299 * specific MTRR.
300 *
301 * During boot or suspend, the state of the boot cpu's mtrrs has been
302 * saved, and we want to replicate that across all the cpus that come
303 * online (either at the end of boot or resume or during a runtime cpu
304 * online). If we're doing that, @reg is set to something special and on
305 * this cpu we still do mtrr_if->set_all(). During boot/resume, this
306 * is unnecessary if at this point we are still on the cpu that started
307 * the boot/resume sequence. But there is no guarantee that we are still
308 * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
309 * sure that we are in sync with everyone else.
310 */
311 if (reg != ~0U)
312 mtrr_if->set(reg, base, size, type);
313 else
314 mtrr_if->set_all();
315 228
316 /* Wait for the others */ 229 stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
317 while (atomic_read(&data.count)) 230}
318 cpu_relax();
319
320 atomic_set(&data.count, num_booting_cpus() - 1);
321 smp_wmb();
322 atomic_set(&data.gate, 1);
323
324 /*
325 * Wait here for everyone to have seen the gate change
326 * So we're the last ones to touch 'data'
327 */
328 while (atomic_read(&data.count))
329 cpu_relax();
330 231
331 local_irq_restore(flags); 232static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
332 preempt_enable(); 233 unsigned long size, mtrr_type type)
234{
235 struct set_mtrr_data data = { .smp_reg = reg,
236 .smp_base = base,
237 .smp_size = size,
238 .smp_type = type
239 };
240
241 stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
242 cpu_callout_mask);
333} 243}
334 244
335/** 245/**
@@ -783,7 +693,7 @@ void mtrr_ap_init(void)
783 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug 693 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
784 * lock to prevent mtrr entry changes 694 * lock to prevent mtrr entry changes
785 */ 695 */
786 set_mtrr(~0U, 0, 0, 0); 696 set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
787} 697}
788 698
789/** 699/**
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3a0338b4b179..4ee3abf20ed6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -22,7 +22,6 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/highmem.h>
26#include <linux/cpu.h> 25#include <linux/cpu.h>
27#include <linux/bitops.h> 26#include <linux/bitops.h>
28 27
@@ -45,38 +44,27 @@ do { \
45#endif 44#endif
46 45
47/* 46/*
48 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context 47 * | NHM/WSM | SNB |
48 * register -------------------------------
49 * | HT | no HT | HT | no HT |
50 *-----------------------------------------
51 * offcore | core | core | cpu | core |
52 * lbr_sel | core | core | cpu | core |
53 * ld_lat | cpu | core | cpu | core |
54 *-----------------------------------------
55 *
56 * Given that there is a small number of shared regs,
57 * we can pre-allocate their slot in the per-cpu
58 * per-core reg tables.
49 */ 59 */
50static unsigned long 60enum extra_reg_type {
51copy_from_user_nmi(void *to, const void __user *from, unsigned long n) 61 EXTRA_REG_NONE = -1, /* not used */
52{
53 unsigned long offset, addr = (unsigned long)from;
54 unsigned long size, len = 0;
55 struct page *page;
56 void *map;
57 int ret;
58
59 do {
60 ret = __get_user_pages_fast(addr, 1, 0, &page);
61 if (!ret)
62 break;
63
64 offset = addr & (PAGE_SIZE - 1);
65 size = min(PAGE_SIZE - offset, n - len);
66
67 map = kmap_atomic(page);
68 memcpy(to, map+offset, size);
69 kunmap_atomic(map);
70 put_page(page);
71 62
72 len += size; 63 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
73 to += size; 64 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
74 addr += size;
75 65
76 } while (len < n); 66 EXTRA_REG_MAX /* number of entries needed */
77 67};
78 return len;
79}
80 68
81struct event_constraint { 69struct event_constraint {
82 union { 70 union {
@@ -132,11 +120,10 @@ struct cpu_hw_events {
132 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; 120 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
133 121
134 /* 122 /*
135 * Intel percore register state. 123 * manage shared (per-core, per-cpu) registers
136 * Coordinate shared resources between HT threads. 124 * used on Intel NHM/WSM/SNB
137 */ 125 */
138 int percore_used; /* Used by this CPU? */ 126 struct intel_shared_regs *shared_regs;
139 struct intel_percore *per_core;
140 127
141 /* 128 /*
142 * AMD specific bits 129 * AMD specific bits
@@ -187,26 +174,45 @@ struct cpu_hw_events {
187 for ((e) = (c); (e)->weight; (e)++) 174 for ((e) = (c); (e)->weight; (e)++)
188 175
189/* 176/*
177 * Per register state.
178 */
179struct er_account {
180 raw_spinlock_t lock; /* per-core: protect structure */
181 u64 config; /* extra MSR config */
182 u64 reg; /* extra MSR number */
183 atomic_t ref; /* reference count */
184};
185
186/*
190 * Extra registers for specific events. 187 * Extra registers for specific events.
188 *
191 * Some events need large masks and require external MSRs. 189 * Some events need large masks and require external MSRs.
192 * Define a mapping to these extra registers. 190 * Those extra MSRs end up being shared for all events on
191 * a PMU and sometimes between PMU of sibling HT threads.
192 * In either case, the kernel needs to handle conflicting
193 * accesses to those extra, shared, regs. The data structure
194 * to manage those registers is stored in cpu_hw_event.
193 */ 195 */
194struct extra_reg { 196struct extra_reg {
195 unsigned int event; 197 unsigned int event;
196 unsigned int msr; 198 unsigned int msr;
197 u64 config_mask; 199 u64 config_mask;
198 u64 valid_mask; 200 u64 valid_mask;
201 int idx; /* per_xxx->regs[] reg index */
199}; 202};
200 203
201#define EVENT_EXTRA_REG(e, ms, m, vm) { \ 204#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
202 .event = (e), \ 205 .event = (e), \
203 .msr = (ms), \ 206 .msr = (ms), \
204 .config_mask = (m), \ 207 .config_mask = (m), \
205 .valid_mask = (vm), \ 208 .valid_mask = (vm), \
209 .idx = EXTRA_REG_##i \
206 } 210 }
207#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ 211
208 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) 212#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
209#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) 213 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
214
215#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
210 216
211union perf_capabilities { 217union perf_capabilities {
212 struct { 218 struct {
@@ -252,7 +258,6 @@ struct x86_pmu {
252 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 258 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
253 struct perf_event *event); 259 struct perf_event *event);
254 struct event_constraint *event_constraints; 260 struct event_constraint *event_constraints;
255 struct event_constraint *percore_constraints;
256 void (*quirks)(void); 261 void (*quirks)(void);
257 int perfctr_second_write; 262 int perfctr_second_write;
258 263
@@ -286,8 +291,12 @@ struct x86_pmu {
286 * Extra registers for events 291 * Extra registers for events
287 */ 292 */
288 struct extra_reg *extra_regs; 293 struct extra_reg *extra_regs;
294 unsigned int er_flags;
289}; 295};
290 296
297#define ERF_NO_HT_SHARING 1
298#define ERF_HAS_RSP_1 2
299
291static struct x86_pmu x86_pmu __read_mostly; 300static struct x86_pmu x86_pmu __read_mostly;
292 301
293static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 302static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -393,10 +402,10 @@ static inline unsigned int x86_pmu_event_addr(int index)
393 */ 402 */
394static int x86_pmu_extra_regs(u64 config, struct perf_event *event) 403static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
395{ 404{
405 struct hw_perf_event_extra *reg;
396 struct extra_reg *er; 406 struct extra_reg *er;
397 407
398 event->hw.extra_reg = 0; 408 reg = &event->hw.extra_reg;
399 event->hw.extra_config = 0;
400 409
401 if (!x86_pmu.extra_regs) 410 if (!x86_pmu.extra_regs)
402 return 0; 411 return 0;
@@ -406,8 +415,10 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
406 continue; 415 continue;
407 if (event->attr.config1 & ~er->valid_mask) 416 if (event->attr.config1 & ~er->valid_mask)
408 return -EINVAL; 417 return -EINVAL;
409 event->hw.extra_reg = er->msr; 418
410 event->hw.extra_config = event->attr.config1; 419 reg->idx = er->idx;
420 reg->config = event->attr.config1;
421 reg->reg = er->msr;
411 break; 422 break;
412 } 423 }
413 return 0; 424 return 0;
@@ -706,6 +717,9 @@ static int __x86_pmu_event_init(struct perf_event *event)
706 event->hw.last_cpu = -1; 717 event->hw.last_cpu = -1;
707 event->hw.last_tag = ~0ULL; 718 event->hw.last_tag = ~0ULL;
708 719
720 /* mark unused */
721 event->hw.extra_reg.idx = EXTRA_REG_NONE;
722
709 return x86_pmu.hw_config(event); 723 return x86_pmu.hw_config(event);
710} 724}
711 725
@@ -747,8 +761,8 @@ static void x86_pmu_disable(struct pmu *pmu)
747static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 761static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
748 u64 enable_mask) 762 u64 enable_mask)
749{ 763{
750 if (hwc->extra_reg) 764 if (hwc->extra_reg.reg)
751 wrmsrl(hwc->extra_reg, hwc->extra_config); 765 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
752 wrmsrl(hwc->config_base, hwc->config | enable_mask); 766 wrmsrl(hwc->config_base, hwc->config | enable_mask);
753} 767}
754 768
@@ -1332,7 +1346,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1332 if (!x86_perf_event_set_period(event)) 1346 if (!x86_perf_event_set_period(event))
1333 continue; 1347 continue;
1334 1348
1335 if (perf_event_overflow(event, 1, &data, regs)) 1349 if (perf_event_overflow(event, &data, regs))
1336 x86_pmu_stop(event, 0); 1350 x86_pmu_stop(event, 0);
1337 } 1351 }
1338 1352
@@ -1637,6 +1651,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1637 perf_pmu_enable(pmu); 1651 perf_pmu_enable(pmu);
1638 return 0; 1652 return 0;
1639} 1653}
1654/*
1655 * a fake_cpuc is used to validate event groups. Due to
1656 * the extra reg logic, we need to also allocate a fake
1657 * per_core and per_cpu structure. Otherwise, group events
1658 * using extra reg may conflict without the kernel being
1659 * able to catch this when the last event gets added to
1660 * the group.
1661 */
1662static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1663{
1664 kfree(cpuc->shared_regs);
1665 kfree(cpuc);
1666}
1667
1668static struct cpu_hw_events *allocate_fake_cpuc(void)
1669{
1670 struct cpu_hw_events *cpuc;
1671 int cpu = raw_smp_processor_id();
1672
1673 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1674 if (!cpuc)
1675 return ERR_PTR(-ENOMEM);
1676
1677 /* only needed, if we have extra_regs */
1678 if (x86_pmu.extra_regs) {
1679 cpuc->shared_regs = allocate_shared_regs(cpu);
1680 if (!cpuc->shared_regs)
1681 goto error;
1682 }
1683 return cpuc;
1684error:
1685 free_fake_cpuc(cpuc);
1686 return ERR_PTR(-ENOMEM);
1687}
1640 1688
1641/* 1689/*
1642 * validate that we can schedule this event 1690 * validate that we can schedule this event
@@ -1647,9 +1695,9 @@ static int validate_event(struct perf_event *event)
1647 struct event_constraint *c; 1695 struct event_constraint *c;
1648 int ret = 0; 1696 int ret = 0;
1649 1697
1650 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); 1698 fake_cpuc = allocate_fake_cpuc();
1651 if (!fake_cpuc) 1699 if (IS_ERR(fake_cpuc))
1652 return -ENOMEM; 1700 return PTR_ERR(fake_cpuc);
1653 1701
1654 c = x86_pmu.get_event_constraints(fake_cpuc, event); 1702 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1655 1703
@@ -1659,7 +1707,7 @@ static int validate_event(struct perf_event *event)
1659 if (x86_pmu.put_event_constraints) 1707 if (x86_pmu.put_event_constraints)
1660 x86_pmu.put_event_constraints(fake_cpuc, event); 1708 x86_pmu.put_event_constraints(fake_cpuc, event);
1661 1709
1662 kfree(fake_cpuc); 1710 free_fake_cpuc(fake_cpuc);
1663 1711
1664 return ret; 1712 return ret;
1665} 1713}
@@ -1679,36 +1727,32 @@ static int validate_group(struct perf_event *event)
1679{ 1727{
1680 struct perf_event *leader = event->group_leader; 1728 struct perf_event *leader = event->group_leader;
1681 struct cpu_hw_events *fake_cpuc; 1729 struct cpu_hw_events *fake_cpuc;
1682 int ret, n; 1730 int ret = -ENOSPC, n;
1683
1684 ret = -ENOMEM;
1685 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1686 if (!fake_cpuc)
1687 goto out;
1688 1731
1732 fake_cpuc = allocate_fake_cpuc();
1733 if (IS_ERR(fake_cpuc))
1734 return PTR_ERR(fake_cpuc);
1689 /* 1735 /*
1690 * the event is not yet connected with its 1736 * the event is not yet connected with its
1691 * siblings therefore we must first collect 1737 * siblings therefore we must first collect
1692 * existing siblings, then add the new event 1738 * existing siblings, then add the new event
1693 * before we can simulate the scheduling 1739 * before we can simulate the scheduling
1694 */ 1740 */
1695 ret = -ENOSPC;
1696 n = collect_events(fake_cpuc, leader, true); 1741 n = collect_events(fake_cpuc, leader, true);
1697 if (n < 0) 1742 if (n < 0)
1698 goto out_free; 1743 goto out;
1699 1744
1700 fake_cpuc->n_events = n; 1745 fake_cpuc->n_events = n;
1701 n = collect_events(fake_cpuc, event, false); 1746 n = collect_events(fake_cpuc, event, false);
1702 if (n < 0) 1747 if (n < 0)
1703 goto out_free; 1748 goto out;
1704 1749
1705 fake_cpuc->n_events = n; 1750 fake_cpuc->n_events = n;
1706 1751
1707 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); 1752 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1708 1753
1709out_free:
1710 kfree(fake_cpuc);
1711out: 1754out:
1755 free_fake_cpuc(fake_cpuc);
1712 return ret; 1756 return ret;
1713} 1757}
1714 1758
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index fe29c1d2219e..941caa2e449b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -89,6 +89,20 @@ static __initconst const u64 amd_hw_cache_event_ids
89 [ C(RESULT_MISS) ] = -1, 89 [ C(RESULT_MISS) ] = -1,
90 }, 90 },
91 }, 91 },
92 [ C(NODE) ] = {
93 [ C(OP_READ) ] = {
94 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
95 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
96 },
97 [ C(OP_WRITE) ] = {
98 [ C(RESULT_ACCESS) ] = -1,
99 [ C(RESULT_MISS) ] = -1,
100 },
101 [ C(OP_PREFETCH) ] = {
102 [ C(RESULT_ACCESS) ] = -1,
103 [ C(RESULT_MISS) ] = -1,
104 },
105 },
92}; 106};
93 107
94/* 108/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 41178c826c48..45fbb8f7f549 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,25 +1,15 @@
1#ifdef CONFIG_CPU_SUP_INTEL 1#ifdef CONFIG_CPU_SUP_INTEL
2 2
3#define MAX_EXTRA_REGS 2
4
5/*
6 * Per register state.
7 */
8struct er_account {
9 int ref; /* reference count */
10 unsigned int extra_reg; /* extra MSR number */
11 u64 extra_config; /* extra MSR config */
12};
13
14/* 3/*
15 * Per core state 4 * Per core/cpu state
16 * This used to coordinate shared registers for HT threads. 5 *
6 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU.
17 */ 8 */
18struct intel_percore { 9struct intel_shared_regs {
19 raw_spinlock_t lock; /* protect structure */ 10 struct er_account regs[EXTRA_REG_MAX];
20 struct er_account regs[MAX_EXTRA_REGS]; 11 int refcnt; /* per-core: #HT threads */
21 int refcnt; /* number of threads */ 12 unsigned core_id; /* per-core: core id */
22 unsigned core_id;
23}; 13};
24 14
25/* 15/*
@@ -88,16 +78,10 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
88 78
89static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 79static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90{ 80{
91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), 81 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
92 EVENT_EXTRA_END 82 EVENT_EXTRA_END
93}; 83};
94 84
95static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly =
96{
97 INTEL_EVENT_CONSTRAINT(0xb7, 0),
98 EVENT_CONSTRAINT_END
99};
100
101static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 85static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
102{ 86{
103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 87 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -116,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
117 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ 101 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 102 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
120 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
121 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 103 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
122 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 104 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
123 EVENT_CONSTRAINT_END 105 EVENT_CONSTRAINT_END
@@ -125,15 +107,13 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
125 107
126static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 108static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
127{ 109{
128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), 110 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), 111 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
130 EVENT_EXTRA_END 112 EVENT_EXTRA_END
131}; 113};
132 114
133static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = 115static struct event_constraint intel_v1_event_constraints[] __read_mostly =
134{ 116{
135 INTEL_EVENT_CONSTRAINT(0xb7, 0),
136 INTEL_EVENT_CONSTRAINT(0xbb, 0),
137 EVENT_CONSTRAINT_END 117 EVENT_CONSTRAINT_END
138}; 118};
139 119
@@ -145,6 +125,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
145 EVENT_CONSTRAINT_END 125 EVENT_CONSTRAINT_END
146}; 126};
147 127
128static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
129 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
130 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
131 EVENT_EXTRA_END
132};
133
148static u64 intel_pmu_event_map(int hw_event) 134static u64 intel_pmu_event_map(int hw_event)
149{ 135{
150 return intel_perfmon_event_map[hw_event]; 136 return intel_perfmon_event_map[hw_event];
@@ -245,6 +231,21 @@ static __initconst const u64 snb_hw_cache_event_ids
245 [ C(RESULT_MISS) ] = -1, 231 [ C(RESULT_MISS) ] = -1,
246 }, 232 },
247 }, 233 },
234 [ C(NODE) ] = {
235 [ C(OP_READ) ] = {
236 [ C(RESULT_ACCESS) ] = -1,
237 [ C(RESULT_MISS) ] = -1,
238 },
239 [ C(OP_WRITE) ] = {
240 [ C(RESULT_ACCESS) ] = -1,
241 [ C(RESULT_MISS) ] = -1,
242 },
243 [ C(OP_PREFETCH) ] = {
244 [ C(RESULT_ACCESS) ] = -1,
245 [ C(RESULT_MISS) ] = -1,
246 },
247 },
248
248}; 249};
249 250
250static __initconst const u64 westmere_hw_cache_event_ids 251static __initconst const u64 westmere_hw_cache_event_ids
@@ -346,6 +347,20 @@ static __initconst const u64 westmere_hw_cache_event_ids
346 [ C(RESULT_MISS) ] = -1, 347 [ C(RESULT_MISS) ] = -1,
347 }, 348 },
348 }, 349 },
350 [ C(NODE) ] = {
351 [ C(OP_READ) ] = {
352 [ C(RESULT_ACCESS) ] = 0x01b7,
353 [ C(RESULT_MISS) ] = 0x01b7,
354 },
355 [ C(OP_WRITE) ] = {
356 [ C(RESULT_ACCESS) ] = 0x01b7,
357 [ C(RESULT_MISS) ] = 0x01b7,
358 },
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = 0x01b7,
361 [ C(RESULT_MISS) ] = 0x01b7,
362 },
363 },
349}; 364};
350 365
351/* 366/*
@@ -398,7 +413,21 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
398 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 413 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
399 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 414 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
400 }, 415 },
401 } 416 },
417 [ C(NODE) ] = {
418 [ C(OP_READ) ] = {
419 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
420 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
421 },
422 [ C(OP_WRITE) ] = {
423 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
424 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
425 },
426 [ C(OP_PREFETCH) ] = {
427 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
428 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
429 },
430 },
402}; 431};
403 432
404static __initconst const u64 nehalem_hw_cache_event_ids 433static __initconst const u64 nehalem_hw_cache_event_ids
@@ -500,6 +529,20 @@ static __initconst const u64 nehalem_hw_cache_event_ids
500 [ C(RESULT_MISS) ] = -1, 529 [ C(RESULT_MISS) ] = -1,
501 }, 530 },
502 }, 531 },
532 [ C(NODE) ] = {
533 [ C(OP_READ) ] = {
534 [ C(RESULT_ACCESS) ] = 0x01b7,
535 [ C(RESULT_MISS) ] = 0x01b7,
536 },
537 [ C(OP_WRITE) ] = {
538 [ C(RESULT_ACCESS) ] = 0x01b7,
539 [ C(RESULT_MISS) ] = 0x01b7,
540 },
541 [ C(OP_PREFETCH) ] = {
542 [ C(RESULT_ACCESS) ] = 0x01b7,
543 [ C(RESULT_MISS) ] = 0x01b7,
544 },
545 },
503}; 546};
504 547
505static __initconst const u64 core2_hw_cache_event_ids 548static __initconst const u64 core2_hw_cache_event_ids
@@ -1003,7 +1046,7 @@ again:
1003 1046
1004 data.period = event->hw.last_period; 1047 data.period = event->hw.last_period;
1005 1048
1006 if (perf_event_overflow(event, 1, &data, regs)) 1049 if (perf_event_overflow(event, &data, regs))
1007 x86_pmu_stop(event, 0); 1050 x86_pmu_stop(event, 0);
1008 } 1051 }
1009 1052
@@ -1037,65 +1080,121 @@ intel_bts_constraints(struct perf_event *event)
1037 return NULL; 1080 return NULL;
1038} 1081}
1039 1082
1083static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
1084{
1085 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1086 return false;
1087
1088 if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
1089 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1090 event->hw.config |= 0x01bb;
1091 event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
1092 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1093 } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
1094 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1095 event->hw.config |= 0x01b7;
1096 event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1097 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1098 }
1099
1100 if (event->hw.extra_reg.idx == orig_idx)
1101 return false;
1102
1103 return true;
1104}
1105
1106/*
1107 * manage allocation of shared extra msr for certain events
1108 *
1109 * sharing can be:
1110 * per-cpu: to be shared between the various events on a single PMU
1111 * per-core: per-cpu + shared by HT threads
1112 */
1040static struct event_constraint * 1113static struct event_constraint *
1041intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 1114__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1115 struct perf_event *event)
1042{ 1116{
1043 struct hw_perf_event *hwc = &event->hw; 1117 struct event_constraint *c = &emptyconstraint;
1044 unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; 1118 struct hw_perf_event_extra *reg = &event->hw.extra_reg;
1045 struct event_constraint *c;
1046 struct intel_percore *pc;
1047 struct er_account *era; 1119 struct er_account *era;
1048 int i; 1120 unsigned long flags;
1049 int free_slot; 1121 int orig_idx = reg->idx;
1050 int found;
1051 1122
1052 if (!x86_pmu.percore_constraints || hwc->extra_alloc) 1123 /* already allocated shared msr */
1053 return NULL; 1124 if (reg->alloc)
1125 return &unconstrained;
1054 1126
1055 for (c = x86_pmu.percore_constraints; c->cmask; c++) { 1127again:
1056 if (e != c->code) 1128 era = &cpuc->shared_regs->regs[reg->idx];
1057 continue; 1129 /*
1130 * we use spin_lock_irqsave() to avoid lockdep issues when
1131 * passing a fake cpuc
1132 */
1133 raw_spin_lock_irqsave(&era->lock, flags);
1134
1135 if (!atomic_read(&era->ref) || era->config == reg->config) {
1136
1137 /* lock in msr value */
1138 era->config = reg->config;
1139 era->reg = reg->reg;
1140
1141 /* one more user */
1142 atomic_inc(&era->ref);
1143
1144 /* no need to reallocate during incremental event scheduling */
1145 reg->alloc = 1;
1058 1146
1059 /* 1147 /*
1060 * Allocate resource per core. 1148 * All events using extra_reg are unconstrained.
1149 * Avoids calling x86_get_event_constraints()
1150 *
1151 * Must revisit if extra_reg controlling events
1152 * ever have constraints. Worst case we go through
1153 * the regular event constraint table.
1061 */ 1154 */
1062 pc = cpuc->per_core; 1155 c = &unconstrained;
1063 if (!pc) 1156 } else if (intel_try_alt_er(event, orig_idx)) {
1064 break; 1157 raw_spin_unlock(&era->lock);
1065 c = &emptyconstraint; 1158 goto again;
1066 raw_spin_lock(&pc->lock);
1067 free_slot = -1;
1068 found = 0;
1069 for (i = 0; i < MAX_EXTRA_REGS; i++) {
1070 era = &pc->regs[i];
1071 if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
1072 /* Allow sharing same config */
1073 if (hwc->extra_config == era->extra_config) {
1074 era->ref++;
1075 cpuc->percore_used = 1;
1076 hwc->extra_alloc = 1;
1077 c = NULL;
1078 }
1079 /* else conflict */
1080 found = 1;
1081 break;
1082 } else if (era->ref == 0 && free_slot == -1)
1083 free_slot = i;
1084 }
1085 if (!found && free_slot != -1) {
1086 era = &pc->regs[free_slot];
1087 era->ref = 1;
1088 era->extra_reg = hwc->extra_reg;
1089 era->extra_config = hwc->extra_config;
1090 cpuc->percore_used = 1;
1091 hwc->extra_alloc = 1;
1092 c = NULL;
1093 }
1094 raw_spin_unlock(&pc->lock);
1095 return c;
1096 } 1159 }
1160 raw_spin_unlock_irqrestore(&era->lock, flags);
1097 1161
1098 return NULL; 1162 return c;
1163}
1164
1165static void
1166__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1167 struct hw_perf_event_extra *reg)
1168{
1169 struct er_account *era;
1170
1171 /*
1172 * only put constraint if extra reg was actually
1173 * allocated. Also takes care of event which do
1174 * not use an extra shared reg
1175 */
1176 if (!reg->alloc)
1177 return;
1178
1179 era = &cpuc->shared_regs->regs[reg->idx];
1180
1181 /* one fewer user */
1182 atomic_dec(&era->ref);
1183
1184 /* allocate again next time */
1185 reg->alloc = 0;
1186}
1187
1188static struct event_constraint *
1189intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1190 struct perf_event *event)
1191{
1192 struct event_constraint *c = NULL;
1193
1194 if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
1195 c = __intel_shared_reg_get_constraints(cpuc, event);
1196
1197 return c;
1099} 1198}
1100 1199
1101static struct event_constraint * 1200static struct event_constraint *
@@ -1111,49 +1210,28 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
1111 if (c) 1210 if (c)
1112 return c; 1211 return c;
1113 1212
1114 c = intel_percore_constraints(cpuc, event); 1213 c = intel_shared_regs_constraints(cpuc, event);
1115 if (c) 1214 if (c)
1116 return c; 1215 return c;
1117 1216
1118 return x86_get_event_constraints(cpuc, event); 1217 return x86_get_event_constraints(cpuc, event);
1119} 1218}
1120 1219
1121static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 1220static void
1221intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1122 struct perf_event *event) 1222 struct perf_event *event)
1123{ 1223{
1124 struct extra_reg *er; 1224 struct hw_perf_event_extra *reg;
1125 struct intel_percore *pc;
1126 struct er_account *era;
1127 struct hw_perf_event *hwc = &event->hw;
1128 int i, allref;
1129 1225
1130 if (!cpuc->percore_used) 1226 reg = &event->hw.extra_reg;
1131 return; 1227 if (reg->idx != EXTRA_REG_NONE)
1132 1228 __intel_shared_reg_put_constraints(cpuc, reg);
1133 for (er = x86_pmu.extra_regs; er->msr; er++) { 1229}
1134 if (er->event != (hwc->config & er->config_mask))
1135 continue;
1136 1230
1137 pc = cpuc->per_core; 1231static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1138 raw_spin_lock(&pc->lock); 1232 struct perf_event *event)
1139 for (i = 0; i < MAX_EXTRA_REGS; i++) { 1233{
1140 era = &pc->regs[i]; 1234 intel_put_shared_regs_event_constraints(cpuc, event);
1141 if (era->ref > 0 &&
1142 era->extra_config == hwc->extra_config &&
1143 era->extra_reg == er->msr) {
1144 era->ref--;
1145 hwc->extra_alloc = 0;
1146 break;
1147 }
1148 }
1149 allref = 0;
1150 for (i = 0; i < MAX_EXTRA_REGS; i++)
1151 allref += pc->regs[i].ref;
1152 if (allref == 0)
1153 cpuc->percore_used = 0;
1154 raw_spin_unlock(&pc->lock);
1155 break;
1156 }
1157} 1235}
1158 1236
1159static int intel_pmu_hw_config(struct perf_event *event) 1237static int intel_pmu_hw_config(struct perf_event *event)
@@ -1231,20 +1309,36 @@ static __initconst const struct x86_pmu core_pmu = {
1231 .event_constraints = intel_core_event_constraints, 1309 .event_constraints = intel_core_event_constraints,
1232}; 1310};
1233 1311
1312static struct intel_shared_regs *allocate_shared_regs(int cpu)
1313{
1314 struct intel_shared_regs *regs;
1315 int i;
1316
1317 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1318 GFP_KERNEL, cpu_to_node(cpu));
1319 if (regs) {
1320 /*
1321 * initialize the locks to keep lockdep happy
1322 */
1323 for (i = 0; i < EXTRA_REG_MAX; i++)
1324 raw_spin_lock_init(&regs->regs[i].lock);
1325
1326 regs->core_id = -1;
1327 }
1328 return regs;
1329}
1330
1234static int intel_pmu_cpu_prepare(int cpu) 1331static int intel_pmu_cpu_prepare(int cpu)
1235{ 1332{
1236 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1333 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1237 1334
1238 if (!cpu_has_ht_siblings()) 1335 if (!x86_pmu.extra_regs)
1239 return NOTIFY_OK; 1336 return NOTIFY_OK;
1240 1337
1241 cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), 1338 cpuc->shared_regs = allocate_shared_regs(cpu);
1242 GFP_KERNEL, cpu_to_node(cpu)); 1339 if (!cpuc->shared_regs)
1243 if (!cpuc->per_core)
1244 return NOTIFY_BAD; 1340 return NOTIFY_BAD;
1245 1341
1246 raw_spin_lock_init(&cpuc->per_core->lock);
1247 cpuc->per_core->core_id = -1;
1248 return NOTIFY_OK; 1342 return NOTIFY_OK;
1249} 1343}
1250 1344
@@ -1260,32 +1354,34 @@ static void intel_pmu_cpu_starting(int cpu)
1260 */ 1354 */
1261 intel_pmu_lbr_reset(); 1355 intel_pmu_lbr_reset();
1262 1356
1263 if (!cpu_has_ht_siblings()) 1357 if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
1264 return; 1358 return;
1265 1359
1266 for_each_cpu(i, topology_thread_cpumask(cpu)) { 1360 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1267 struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; 1361 struct intel_shared_regs *pc;
1268 1362
1363 pc = per_cpu(cpu_hw_events, i).shared_regs;
1269 if (pc && pc->core_id == core_id) { 1364 if (pc && pc->core_id == core_id) {
1270 kfree(cpuc->per_core); 1365 kfree(cpuc->shared_regs);
1271 cpuc->per_core = pc; 1366 cpuc->shared_regs = pc;
1272 break; 1367 break;
1273 } 1368 }
1274 } 1369 }
1275 1370
1276 cpuc->per_core->core_id = core_id; 1371 cpuc->shared_regs->core_id = core_id;
1277 cpuc->per_core->refcnt++; 1372 cpuc->shared_regs->refcnt++;
1278} 1373}
1279 1374
1280static void intel_pmu_cpu_dying(int cpu) 1375static void intel_pmu_cpu_dying(int cpu)
1281{ 1376{
1282 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1377 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1283 struct intel_percore *pc = cpuc->per_core; 1378 struct intel_shared_regs *pc;
1284 1379
1380 pc = cpuc->shared_regs;
1285 if (pc) { 1381 if (pc) {
1286 if (pc->core_id == -1 || --pc->refcnt == 0) 1382 if (pc->core_id == -1 || --pc->refcnt == 0)
1287 kfree(pc); 1383 kfree(pc);
1288 cpuc->per_core = NULL; 1384 cpuc->shared_regs = NULL;
1289 } 1385 }
1290 1386
1291 fini_debug_store_on_cpu(cpu); 1387 fini_debug_store_on_cpu(cpu);
@@ -1436,7 +1532,6 @@ static __init int intel_pmu_init(void)
1436 1532
1437 x86_pmu.event_constraints = intel_nehalem_event_constraints; 1533 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1438 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 1534 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1439 x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
1440 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 1535 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1441 x86_pmu.extra_regs = intel_nehalem_extra_regs; 1536 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1442 1537
@@ -1481,10 +1576,10 @@ static __init int intel_pmu_init(void)
1481 intel_pmu_lbr_init_nhm(); 1576 intel_pmu_lbr_init_nhm();
1482 1577
1483 x86_pmu.event_constraints = intel_westmere_event_constraints; 1578 x86_pmu.event_constraints = intel_westmere_event_constraints;
1484 x86_pmu.percore_constraints = intel_westmere_percore_constraints;
1485 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 1579 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1486 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 1580 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1487 x86_pmu.extra_regs = intel_westmere_extra_regs; 1581 x86_pmu.extra_regs = intel_westmere_extra_regs;
1582 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1488 1583
1489 /* UOPS_ISSUED.STALLED_CYCLES */ 1584 /* UOPS_ISSUED.STALLED_CYCLES */
1490 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1585 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
@@ -1502,6 +1597,10 @@ static __init int intel_pmu_init(void)
1502 1597
1503 x86_pmu.event_constraints = intel_snb_event_constraints; 1598 x86_pmu.event_constraints = intel_snb_event_constraints;
1504 x86_pmu.pebs_constraints = intel_snb_pebs_events; 1599 x86_pmu.pebs_constraints = intel_snb_pebs_events;
1600 x86_pmu.extra_regs = intel_snb_extra_regs;
1601 /* all extra regs are per-cpu when HT is on */
1602 x86_pmu.er_flags |= ERF_HAS_RSP_1;
1603 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
1505 1604
1506 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 1605 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1507 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; 1606 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
@@ -1512,11 +1611,19 @@ static __init int intel_pmu_init(void)
1512 break; 1611 break;
1513 1612
1514 default: 1613 default:
1515 /* 1614 switch (x86_pmu.version) {
1516 * default constraints for v2 and up 1615 case 1:
1517 */ 1616 x86_pmu.event_constraints = intel_v1_event_constraints;
1518 x86_pmu.event_constraints = intel_gen_event_constraints; 1617 pr_cont("generic architected perfmon v1, ");
1519 pr_cont("generic architected perfmon, "); 1618 break;
1619 default:
1620 /*
1621 * default constraints for v2 and up
1622 */
1623 x86_pmu.event_constraints = intel_gen_event_constraints;
1624 pr_cont("generic architected perfmon, ");
1625 break;
1626 }
1520 } 1627 }
1521 return 0; 1628 return 0;
1522} 1629}
@@ -1528,4 +1635,8 @@ static int intel_pmu_init(void)
1528 return 0; 1635 return 0;
1529} 1636}
1530 1637
1638static struct intel_shared_regs *allocate_shared_regs(int cpu)
1639{
1640 return NULL;
1641}
1531#endif /* CONFIG_CPU_SUP_INTEL */ 1642#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index bab491b8ee25..1b1ef3addcfd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
340 */ 340 */
341 perf_prepare_sample(&header, &data, event, &regs); 341 perf_prepare_sample(&header, &data, event, &regs);
342 342
343 if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) 343 if (perf_output_begin(&handle, event, header.size * (top - at)))
344 return 1; 344 return 1;
345 345
346 for (; at < top; at++) { 346 for (; at < top; at++) {
@@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
616 else 616 else
617 regs.flags &= ~PERF_EFLAGS_EXACT; 617 regs.flags &= ~PERF_EFLAGS_EXACT;
618 618
619 if (perf_event_overflow(event, 1, &data, &regs)) 619 if (perf_event_overflow(event, &data, &regs))
620 x86_pmu_stop(event, 0); 620 x86_pmu_stop(event, 0);
621} 621}
622 622
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index ead584fb6a7d..7809d2bcb209 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -554,13 +554,102 @@ static __initconst const u64 p4_hw_cache_event_ids
554 [ C(RESULT_MISS) ] = -1, 554 [ C(RESULT_MISS) ] = -1,
555 }, 555 },
556 }, 556 },
557 [ C(NODE) ] = {
558 [ C(OP_READ) ] = {
559 [ C(RESULT_ACCESS) ] = -1,
560 [ C(RESULT_MISS) ] = -1,
561 },
562 [ C(OP_WRITE) ] = {
563 [ C(RESULT_ACCESS) ] = -1,
564 [ C(RESULT_MISS) ] = -1,
565 },
566 [ C(OP_PREFETCH) ] = {
567 [ C(RESULT_ACCESS) ] = -1,
568 [ C(RESULT_MISS) ] = -1,
569 },
570 },
557}; 571};
558 572
573/*
574 * Because of Netburst being quite restricted in how many
575 * identical events may run simultaneously, we introduce event aliases,
576 * ie the different events which have the same functionality but
577 * utilize non-intersected resources (ESCR/CCCR/counter registers).
578 *
579 * This allow us to relax restrictions a bit and run two or more
580 * identical events together.
581 *
582 * Never set any custom internal bits such as P4_CONFIG_HT,
583 * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
584 * either up to date automatically or not applicable at all.
585 */
586struct p4_event_alias {
587 u64 original;
588 u64 alternative;
589} p4_event_aliases[] = {
590 {
591 /*
592 * Non-halted cycles can be substituted with non-sleeping cycles (see
593 * Intel SDM Vol3b for details). We need this alias to be able
594 * to run nmi-watchdog and 'perf top' (or any other user space tool
595 * which is interested in running PERF_COUNT_HW_CPU_CYCLES)
596 * simultaneously.
597 */
598 .original =
599 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
600 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
601 .alternative =
602 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
603 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)|
604 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)|
605 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)|
606 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)|
607 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
608 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
609 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
610 P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))|
611 p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
612 P4_CCCR_COMPARE),
613 },
614};
615
616static u64 p4_get_alias_event(u64 config)
617{
618 u64 config_match;
619 int i;
620
621 /*
622 * Only event with special mark is allowed,
623 * we're to be sure it didn't come as malformed
624 * RAW event.
625 */
626 if (!(config & P4_CONFIG_ALIASABLE))
627 return 0;
628
629 config_match = config & P4_CONFIG_EVENT_ALIAS_MASK;
630
631 for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) {
632 if (config_match == p4_event_aliases[i].original) {
633 config_match = p4_event_aliases[i].alternative;
634 break;
635 } else if (config_match == p4_event_aliases[i].alternative) {
636 config_match = p4_event_aliases[i].original;
637 break;
638 }
639 }
640
641 if (i >= ARRAY_SIZE(p4_event_aliases))
642 return 0;
643
644 return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS);
645}
646
559static u64 p4_general_events[PERF_COUNT_HW_MAX] = { 647static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
560 /* non-halted CPU clocks */ 648 /* non-halted CPU clocks */
561 [PERF_COUNT_HW_CPU_CYCLES] = 649 [PERF_COUNT_HW_CPU_CYCLES] =
562 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | 650 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
563 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), 651 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) |
652 P4_CONFIG_ALIASABLE,
564 653
565 /* 654 /*
566 * retired instructions 655 * retired instructions
@@ -945,7 +1034,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
945 1034
946 if (!x86_perf_event_set_period(event)) 1035 if (!x86_perf_event_set_period(event))
947 continue; 1036 continue;
948 if (perf_event_overflow(event, 1, &data, regs)) 1037 if (perf_event_overflow(event, &data, regs))
949 x86_pmu_stop(event, 0); 1038 x86_pmu_stop(event, 0);
950 } 1039 }
951 1040
@@ -1120,6 +1209,8 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
1120 struct p4_event_bind *bind; 1209 struct p4_event_bind *bind;
1121 unsigned int i, thread, num; 1210 unsigned int i, thread, num;
1122 int cntr_idx, escr_idx; 1211 int cntr_idx, escr_idx;
1212 u64 config_alias;
1213 int pass;
1123 1214
1124 bitmap_zero(used_mask, X86_PMC_IDX_MAX); 1215 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1125 bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE); 1216 bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
@@ -1128,6 +1219,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
1128 1219
1129 hwc = &cpuc->event_list[i]->hw; 1220 hwc = &cpuc->event_list[i]->hw;
1130 thread = p4_ht_thread(cpu); 1221 thread = p4_ht_thread(cpu);
1222 pass = 0;
1223
1224again:
1225 /*
1226 * It's possible to hit a circular lock
1227 * between original and alternative events
1228 * if both are scheduled already.
1229 */
1230 if (pass > 2)
1231 goto done;
1232
1131 bind = p4_config_get_bind(hwc->config); 1233 bind = p4_config_get_bind(hwc->config);
1132 escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); 1234 escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
1133 if (unlikely(escr_idx == -1)) 1235 if (unlikely(escr_idx == -1))
@@ -1141,8 +1243,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
1141 } 1243 }
1142 1244
1143 cntr_idx = p4_next_cntr(thread, used_mask, bind); 1245 cntr_idx = p4_next_cntr(thread, used_mask, bind);
1144 if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) 1246 if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) {
1145 goto done; 1247 /*
1248 * Check whether an event alias is still available.
1249 */
1250 config_alias = p4_get_alias_event(hwc->config);
1251 if (!config_alias)
1252 goto done;
1253 hwc->config = config_alias;
1254 pass++;
1255 goto again;
1256 }
1146 1257
1147 p4_pmu_swap_config_ts(hwc, cpu); 1258 p4_pmu_swap_config_ts(hwc, cpu);
1148 if (assign) 1259 if (assign)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 9aeb78a23de4..a621f3427685 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -134,6 +134,24 @@ static int __init add_bus_probe(void)
134module_init(add_bus_probe); 134module_init(add_bus_probe);
135 135
136#ifdef CONFIG_PCI 136#ifdef CONFIG_PCI
137struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
138{
139 struct device_node *np;
140
141 for_each_node_by_type(np, "pci") {
142 const void *prop;
143 unsigned int bus_min;
144
145 prop = of_get_property(np, "bus-range", NULL);
146 if (!prop)
147 continue;
148 bus_min = be32_to_cpup(prop);
149 if (bus->number == bus_min)
150 return np;
151 }
152 return NULL;
153}
154
137static int x86_of_pci_irq_enable(struct pci_dev *dev) 155static int x86_of_pci_irq_enable(struct pci_dev *dev)
138{ 156{
139 struct of_irq oirq; 157 struct of_irq oirq;
@@ -165,50 +183,8 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
165 183
166void __cpuinit x86_of_pci_init(void) 184void __cpuinit x86_of_pci_init(void)
167{ 185{
168 struct device_node *np;
169
170 pcibios_enable_irq = x86_of_pci_irq_enable; 186 pcibios_enable_irq = x86_of_pci_irq_enable;
171 pcibios_disable_irq = x86_of_pci_irq_disable; 187 pcibios_disable_irq = x86_of_pci_irq_disable;
172
173 for_each_node_by_type(np, "pci") {
174 const void *prop;
175 struct pci_bus *bus;
176 unsigned int bus_min;
177 struct device_node *child;
178
179 prop = of_get_property(np, "bus-range", NULL);
180 if (!prop)
181 continue;
182 bus_min = be32_to_cpup(prop);
183
184 bus = pci_find_bus(0, bus_min);
185 if (!bus) {
186 printk(KERN_ERR "Can't find a node for bus %s.\n",
187 np->full_name);
188 continue;
189 }
190
191 if (bus->self)
192 bus->self->dev.of_node = np;
193 else
194 bus->dev.of_node = np;
195
196 for_each_child_of_node(np, child) {
197 struct pci_dev *dev;
198 u32 devfn;
199
200 prop = of_get_property(child, "reg", NULL);
201 if (!prop)
202 continue;
203
204 devfn = (be32_to_cpup(prop) >> 8) & 0xff;
205 dev = pci_get_slot(bus, devfn);
206 if (!dev)
207 continue;
208 dev->dev.of_node = child;
209 pci_dev_put(dev);
210 }
211 }
212} 188}
213#endif 189#endif
214 190
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index e71c98d3c0d2..19853ad8afc5 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
105} 105}
106 106
107/* 107/*
108 * We are returning from the irq stack and go to the previous one.
109 * If the previous stack is also in the irq stack, then bp in the first
110 * frame of the irq stack points to the previous, interrupted one.
111 * Otherwise we have another level of indirection: We first save
112 * the bp of the previous stack, then we switch the stack to the irq one
113 * and save a new bp that links to the previous one.
114 * (See save_args())
115 */
116static inline unsigned long
117fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
118 unsigned long *irq_stack, unsigned long *irq_stack_end)
119{
120#ifdef CONFIG_FRAME_POINTER
121 struct stack_frame *frame = (struct stack_frame *)bp;
122 unsigned long next;
123
124 if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
125 if (!probe_kernel_address(&frame->next_frame, next))
126 return next;
127 else
128 WARN_ONCE(1, "Perf: bad frame pointer = %p in "
129 "callchain\n", &frame->next_frame);
130 }
131#endif
132 return bp;
133}
134
135/*
136 * x86-64 can have up to three kernel stacks: 108 * x86-64 can have up to three kernel stacks:
137 * process stack 109 * process stack
138 * interrupt stack 110 * interrupt stack
@@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
155 task = current; 127 task = current;
156 128
157 if (!stack) { 129 if (!stack) {
158 stack = &dummy; 130 if (regs)
159 if (task && task != current) 131 stack = (unsigned long *)regs->sp;
132 else if (task && task != current)
160 stack = (unsigned long *)task->thread.sp; 133 stack = (unsigned long *)task->thread.sp;
134 else
135 stack = &dummy;
161 } 136 }
162 137
163 if (!bp) 138 if (!bp)
@@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
205 * pointer (index -1 to end) in the IRQ stack: 180 * pointer (index -1 to end) in the IRQ stack:
206 */ 181 */
207 stack = (unsigned long *) (irq_stack_end[-1]); 182 stack = (unsigned long *) (irq_stack_end[-1]);
208 bp = fixup_bp_irq_link(bp, stack, irq_stack,
209 irq_stack_end);
210 irq_stack_end = NULL; 183 irq_stack_end = NULL;
211 ops->stack(data, "EOI"); 184 ops->stack(data, "EOI");
212 continue; 185 continue;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8a445a0c989e..e13329d800c8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -9,6 +9,8 @@
9/* 9/*
10 * entry.S contains the system-call and fault low-level handling routines. 10 * entry.S contains the system-call and fault low-level handling routines.
11 * 11 *
12 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
12 * NOTE: This code handles signal-recognition, which happens every time 14 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call. 15 * after an interrupt and after each system call.
14 * 16 *
@@ -297,27 +299,26 @@ ENDPROC(native_usergs_sysret64)
297 .endm 299 .endm
298 300
299/* save partial stack frame */ 301/* save partial stack frame */
300 .pushsection .kprobes.text, "ax" 302 .macro SAVE_ARGS_IRQ
301ENTRY(save_args)
302 XCPT_FRAME
303 cld 303 cld
304 /* 304 /* start from rbp in pt_regs and jump over */
305 * start from rbp in pt_regs and jump over 305 movq_cfi rdi, RDI-RBP
306 * return address. 306 movq_cfi rsi, RSI-RBP
307 */ 307 movq_cfi rdx, RDX-RBP
308 movq_cfi rdi, RDI+8-RBP 308 movq_cfi rcx, RCX-RBP
309 movq_cfi rsi, RSI+8-RBP 309 movq_cfi rax, RAX-RBP
310 movq_cfi rdx, RDX+8-RBP 310 movq_cfi r8, R8-RBP
311 movq_cfi rcx, RCX+8-RBP 311 movq_cfi r9, R9-RBP
312 movq_cfi rax, RAX+8-RBP 312 movq_cfi r10, R10-RBP
313 movq_cfi r8, R8+8-RBP 313 movq_cfi r11, R11-RBP
314 movq_cfi r9, R9+8-RBP 314
315 movq_cfi r10, R10+8-RBP 315 /* Save rbp so that we can unwind from get_irq_regs() */
316 movq_cfi r11, R11+8-RBP 316 movq_cfi rbp, 0
317 317
318 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ 318 /* Save previous stack value */
319 movq_cfi rbp, 8 /* push %rbp */ 319 movq %rsp, %rsi
320 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ 320
321 leaq -RBP(%rsp),%rdi /* arg1 for handler */
321 testl $3, CS(%rdi) 322 testl $3, CS(%rdi)
322 je 1f 323 je 1f
323 SWAPGS 324 SWAPGS
@@ -329,19 +330,14 @@ ENTRY(save_args)
329 */ 330 */
3301: incl PER_CPU_VAR(irq_count) 3311: incl PER_CPU_VAR(irq_count)
331 jne 2f 332 jne 2f
332 popq_cfi %rax /* move return address... */
333 mov PER_CPU_VAR(irq_stack_ptr),%rsp 333 mov PER_CPU_VAR(irq_stack_ptr),%rsp
334 EMPTY_FRAME 0 334 EMPTY_FRAME 0
335 pushq_cfi %rbp /* backlink for unwinder */ 335
336 pushq_cfi %rax /* ... to the new stack */ 3362: /* Store previous stack value */
337 /* 337 pushq %rsi
338 * We entered an interrupt context - irqs are off: 338 /* We entered an interrupt context - irqs are off: */
339 */ 339 TRACE_IRQS_OFF
3402: TRACE_IRQS_OFF 340 .endm
341 ret
342 CFI_ENDPROC
343END(save_args)
344 .popsection
345 341
346ENTRY(save_rest) 342ENTRY(save_rest)
347 PARTIAL_FRAME 1 REST_SKIP+8 343 PARTIAL_FRAME 1 REST_SKIP+8
@@ -473,7 +469,7 @@ ENTRY(system_call_after_swapgs)
473 * and short: 469 * and short:
474 */ 470 */
475 ENABLE_INTERRUPTS(CLBR_NONE) 471 ENABLE_INTERRUPTS(CLBR_NONE)
476 SAVE_ARGS 8,1 472 SAVE_ARGS 8,0
477 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 473 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
478 movq %rcx,RIP-ARGOFFSET(%rsp) 474 movq %rcx,RIP-ARGOFFSET(%rsp)
479 CFI_REL_OFFSET rip,RIP-ARGOFFSET 475 CFI_REL_OFFSET rip,RIP-ARGOFFSET
@@ -508,7 +504,7 @@ sysret_check:
508 TRACE_IRQS_ON 504 TRACE_IRQS_ON
509 movq RIP-ARGOFFSET(%rsp),%rcx 505 movq RIP-ARGOFFSET(%rsp),%rcx
510 CFI_REGISTER rip,rcx 506 CFI_REGISTER rip,rcx
511 RESTORE_ARGS 0,-ARG_SKIP,1 507 RESTORE_ARGS 1,-ARG_SKIP,0
512 /*CFI_REGISTER rflags,r11*/ 508 /*CFI_REGISTER rflags,r11*/
513 movq PER_CPU_VAR(old_rsp), %rsp 509 movq PER_CPU_VAR(old_rsp), %rsp
514 USERGS_SYSRET64 510 USERGS_SYSRET64
@@ -791,7 +787,7 @@ END(interrupt)
791 /* reserve pt_regs for scratch regs and rbp */ 787 /* reserve pt_regs for scratch regs and rbp */
792 subq $ORIG_RAX-RBP, %rsp 788 subq $ORIG_RAX-RBP, %rsp
793 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP 789 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
794 call save_args 790 SAVE_ARGS_IRQ
795 PARTIAL_FRAME 0 791 PARTIAL_FRAME 0
796 call \func 792 call \func
797 .endm 793 .endm
@@ -814,15 +810,14 @@ ret_from_intr:
814 DISABLE_INTERRUPTS(CLBR_NONE) 810 DISABLE_INTERRUPTS(CLBR_NONE)
815 TRACE_IRQS_OFF 811 TRACE_IRQS_OFF
816 decl PER_CPU_VAR(irq_count) 812 decl PER_CPU_VAR(irq_count)
817 leaveq
818 813
819 CFI_RESTORE rbp 814 /* Restore saved previous stack */
815 popq %rsi
816 leaq 16(%rsi), %rsp
817
820 CFI_DEF_CFA_REGISTER rsp 818 CFI_DEF_CFA_REGISTER rsp
821 CFI_ADJUST_CFA_OFFSET -8 819 CFI_ADJUST_CFA_OFFSET -16
822 820
823 /* we did not save rbx, restore only from ARGOFFSET */
824 addq $8, %rsp
825 CFI_ADJUST_CFA_OFFSET -8
826exit_intr: 821exit_intr:
827 GET_THREAD_INFO(%rcx) 822 GET_THREAD_INFO(%rcx)
828 testl $3,CS-ARGOFFSET(%rsp) 823 testl $3,CS-ARGOFFSET(%rsp)
@@ -858,7 +853,7 @@ retint_restore_args: /* return to kernel space */
858 */ 853 */
859 TRACE_IRQS_IRETQ 854 TRACE_IRQS_IRETQ
860restore_args: 855restore_args:
861 RESTORE_ARGS 0,8,0 856 RESTORE_ARGS 1,8,1
862 857
863irq_return: 858irq_return:
864 INTERRUPT_RETURN 859 INTERRUPT_RETURN
@@ -991,11 +986,6 @@ apicinterrupt THRESHOLD_APIC_VECTOR \
991apicinterrupt THERMAL_APIC_VECTOR \ 986apicinterrupt THERMAL_APIC_VECTOR \
992 thermal_interrupt smp_thermal_interrupt 987 thermal_interrupt smp_thermal_interrupt
993 988
994#ifdef CONFIG_X86_MCE
995apicinterrupt MCE_SELF_VECTOR \
996 mce_self_interrupt smp_mce_self_interrupt
997#endif
998
999#ifdef CONFIG_SMP 989#ifdef CONFIG_SMP
1000apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ 990apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1001 call_function_single_interrupt smp_call_function_single_interrupt 991 call_function_single_interrupt smp_call_function_single_interrupt
@@ -1121,6 +1111,8 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1121zeroentry coprocessor_error do_coprocessor_error 1111zeroentry coprocessor_error do_coprocessor_error
1122errorentry alignment_check do_alignment_check 1112errorentry alignment_check do_alignment_check
1123zeroentry simd_coprocessor_error do_simd_coprocessor_error 1113zeroentry simd_coprocessor_error do_simd_coprocessor_error
1114zeroentry emulate_vsyscall do_emulate_vsyscall
1115
1124 1116
1125 /* Reload gs selector with exception handling */ 1117 /* Reload gs selector with exception handling */
1126 /* edi: new selector */ 1118 /* edi: new selector */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 6781765b3a0d..4aecc54236a9 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -4,6 +4,7 @@
4#include <linux/sysdev.h> 4#include <linux/sysdev.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/i8253.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/hpet.h> 9#include <linux/hpet.h>
9#include <linux/init.h> 10#include <linux/init.h>
@@ -12,8 +13,8 @@
12#include <linux/io.h> 13#include <linux/io.h>
13 14
14#include <asm/fixmap.h> 15#include <asm/fixmap.h>
15#include <asm/i8253.h>
16#include <asm/hpet.h> 16#include <asm/hpet.h>
17#include <asm/time.h>
17 18
18#define HPET_MASK CLOCKSOURCE_MASK(32) 19#define HPET_MASK CLOCKSOURCE_MASK(32)
19 20
@@ -71,7 +72,7 @@ static inline void hpet_set_mapping(void)
71{ 72{
72 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); 73 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
73#ifdef CONFIG_X86_64 74#ifdef CONFIG_X86_64
74 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); 75 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
75#endif 76#endif
76} 77}
77 78
@@ -738,13 +739,6 @@ static cycle_t read_hpet(struct clocksource *cs)
738 return (cycle_t)hpet_readl(HPET_COUNTER); 739 return (cycle_t)hpet_readl(HPET_COUNTER);
739} 740}
740 741
741#ifdef CONFIG_X86_64
742static cycle_t __vsyscall_fn vread_hpet(void)
743{
744 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
745}
746#endif
747
748static struct clocksource clocksource_hpet = { 742static struct clocksource clocksource_hpet = {
749 .name = "hpet", 743 .name = "hpet",
750 .rating = 250, 744 .rating = 250,
@@ -753,7 +747,7 @@ static struct clocksource clocksource_hpet = {
753 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 747 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
754 .resume = hpet_resume_counter, 748 .resume = hpet_resume_counter,
755#ifdef CONFIG_X86_64 749#ifdef CONFIG_X86_64
756 .vread = vread_hpet, 750 .archdata = { .vclock_mode = VCLOCK_HPET },
757#endif 751#endif
758}; 752};
759 753
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index fb66dc9e36cb..f2b96de3c7c1 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -3,113 +3,24 @@
3 * 3 *
4 */ 4 */
5#include <linux/clockchips.h> 5#include <linux/clockchips.h>
6#include <linux/interrupt.h>
7#include <linux/spinlock.h>
8#include <linux/jiffies.h>
9#include <linux/module.h> 6#include <linux/module.h>
10#include <linux/timex.h> 7#include <linux/timex.h>
11#include <linux/delay.h> 8#include <linux/i8253.h>
12#include <linux/init.h>
13#include <linux/io.h>
14 9
15#include <asm/i8253.h>
16#include <asm/hpet.h> 10#include <asm/hpet.h>
11#include <asm/time.h>
17#include <asm/smp.h> 12#include <asm/smp.h>
18 13
19DEFINE_RAW_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock);
21
22/* 14/*
23 * HPET replaces the PIT, when enabled. So we need to know, which of 15 * HPET replaces the PIT, when enabled. So we need to know, which of
24 * the two timers is used 16 * the two timers is used
25 */ 17 */
26struct clock_event_device *global_clock_event; 18struct clock_event_device *global_clock_event;
27 19
28/*
29 * Initialize the PIT timer.
30 *
31 * This is also called after resume to bring the PIT into operation again.
32 */
33static void init_pit_timer(enum clock_event_mode mode,
34 struct clock_event_device *evt)
35{
36 raw_spin_lock(&i8253_lock);
37
38 switch (mode) {
39 case CLOCK_EVT_MODE_PERIODIC:
40 /* binary, mode 2, LSB/MSB, ch 0 */
41 outb_pit(0x34, PIT_MODE);
42 outb_pit(LATCH & 0xff , PIT_CH0); /* LSB */
43 outb_pit(LATCH >> 8 , PIT_CH0); /* MSB */
44 break;
45
46 case CLOCK_EVT_MODE_SHUTDOWN:
47 case CLOCK_EVT_MODE_UNUSED:
48 if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
49 evt->mode == CLOCK_EVT_MODE_ONESHOT) {
50 outb_pit(0x30, PIT_MODE);
51 outb_pit(0, PIT_CH0);
52 outb_pit(0, PIT_CH0);
53 }
54 break;
55
56 case CLOCK_EVT_MODE_ONESHOT:
57 /* One shot setup */
58 outb_pit(0x38, PIT_MODE);
59 break;
60
61 case CLOCK_EVT_MODE_RESUME:
62 /* Nothing to do here */
63 break;
64 }
65 raw_spin_unlock(&i8253_lock);
66}
67
68/*
69 * Program the next event in oneshot mode
70 *
71 * Delta is given in PIT ticks
72 */
73static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
74{
75 raw_spin_lock(&i8253_lock);
76 outb_pit(delta & 0xff , PIT_CH0); /* LSB */
77 outb_pit(delta >> 8 , PIT_CH0); /* MSB */
78 raw_spin_unlock(&i8253_lock);
79
80 return 0;
81}
82
83/*
84 * On UP the PIT can serve all of the possible timer functions. On SMP systems
85 * it can be solely used for the global tick.
86 *
87 * The profiling and update capabilities are switched off once the local apic is
88 * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
89 * !using_apic_timer decisions in do_timer_interrupt_hook()
90 */
91static struct clock_event_device pit_ce = {
92 .name = "pit",
93 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
94 .set_mode = init_pit_timer,
95 .set_next_event = pit_next_event,
96 .irq = 0,
97};
98
99/*
100 * Initialize the conversion factor and the min/max deltas of the clock event
101 * structure and register the clock event source with the framework.
102 */
103void __init setup_pit_timer(void) 20void __init setup_pit_timer(void)
104{ 21{
105 /* 22 clockevent_i8253_init(true);
106 * Start pit with the boot cpu mask and make it global after the 23 global_clock_event = &i8253_clockevent;
107 * IO_APIC has been initialized.
108 */
109 pit_ce.cpumask = cpumask_of(smp_processor_id());
110
111 clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF);
112 global_clock_event = &pit_ce;
113} 24}
114 25
115#ifndef CONFIG_X86_64 26#ifndef CONFIG_X86_64
@@ -123,7 +34,7 @@ static int __init init_pit_clocksource(void)
123 * - when local APIC timer is active (PIT is switched off) 34 * - when local APIC timer is active (PIT is switched off)
124 */ 35 */
125 if (num_possible_cpus() > 1 || is_hpet_enabled() || 36 if (num_possible_cpus() > 1 || is_hpet_enabled() ||
126 pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) 37 i8253_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
127 return 0; 38 return 0;
128 39
129 return clocksource_i8253_init(); 40 return clocksource_i8253_init();
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index f470e4ef993e..f09d4bbe2d2d 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -272,9 +272,6 @@ static void __init apic_intr_init(void)
272#ifdef CONFIG_X86_MCE_THRESHOLD 272#ifdef CONFIG_X86_MCE_THRESHOLD
273 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 273 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
274#endif 274#endif
275#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC)
276 alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
277#endif
278 275
279#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 276#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
280 /* self generated IPI for local APIC timer */ 277 /* self generated IPI for local APIC timer */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 5f9ecff328b5..00354d4919a9 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -608,7 +608,7 @@ int kgdb_arch_init(void)
608 return register_die_notifier(&kgdb_notifier); 608 return register_die_notifier(&kgdb_notifier);
609} 609}
610 610
611static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, 611static void kgdb_hw_overflow_handler(struct perf_event *event,
612 struct perf_sample_data *data, struct pt_regs *regs) 612 struct perf_sample_data *data, struct pt_regs *regs)
613{ 613{
614 struct task_struct *tsk = current; 614 struct task_struct *tsk = current;
@@ -638,7 +638,7 @@ void kgdb_arch_late(void)
638 for (i = 0; i < HBP_NUM; i++) { 638 for (i = 0; i < HBP_NUM; i++) {
639 if (breakinfo[i].pev) 639 if (breakinfo[i].pev)
640 continue; 640 continue;
641 breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); 641 breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
642 if (IS_ERR((void * __force)breakinfo[i].pev)) { 642 if (IS_ERR((void * __force)breakinfo[i].pev)) {
643 printk(KERN_ERR "kgdb: Could not allocate hw" 643 printk(KERN_ERR "kgdb: Could not allocate hw"
644 "breakpoints\nDisabling the kernel debugger\n"); 644 "breakpoints\nDisabling the kernel debugger\n");
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index c5610384ab16..591be0ee1934 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -66,8 +66,8 @@ struct microcode_amd {
66 unsigned int mpb[0]; 66 unsigned int mpb[0];
67}; 67};
68 68
69#define UCODE_CONTAINER_SECTION_HDR 8 69#define SECTION_HDR_SIZE 8
70#define UCODE_CONTAINER_HEADER_SIZE 12 70#define CONTAINER_HDR_SZ 12
71 71
72static struct equiv_cpu_entry *equiv_cpu_table; 72static struct equiv_cpu_entry *equiv_cpu_table;
73 73
@@ -157,7 +157,7 @@ static int apply_microcode_amd(int cpu)
157static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) 157static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
158{ 158{
159 struct cpuinfo_x86 *c = &cpu_data(cpu); 159 struct cpuinfo_x86 *c = &cpu_data(cpu);
160 unsigned int max_size, actual_size; 160 u32 max_size, actual_size;
161 161
162#define F1XH_MPB_MAX_SIZE 2048 162#define F1XH_MPB_MAX_SIZE 2048
163#define F14H_MPB_MAX_SIZE 1824 163#define F14H_MPB_MAX_SIZE 1824
@@ -175,9 +175,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
175 break; 175 break;
176 } 176 }
177 177
178 actual_size = buf[4] + (buf[5] << 8); 178 actual_size = *(u32 *)(buf + 4);
179 179
180 if (actual_size > size || actual_size > max_size) { 180 if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) {
181 pr_err("section size mismatch\n"); 181 pr_err("section size mismatch\n");
182 return 0; 182 return 0;
183 } 183 }
@@ -191,7 +191,7 @@ get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
191 struct microcode_header_amd *mc = NULL; 191 struct microcode_header_amd *mc = NULL;
192 unsigned int actual_size = 0; 192 unsigned int actual_size = 0;
193 193
194 if (buf[0] != UCODE_UCODE_TYPE) { 194 if (*(u32 *)buf != UCODE_UCODE_TYPE) {
195 pr_err("invalid type field in container file section header\n"); 195 pr_err("invalid type field in container file section header\n");
196 goto out; 196 goto out;
197 } 197 }
@@ -204,8 +204,8 @@ get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
204 if (!mc) 204 if (!mc)
205 goto out; 205 goto out;
206 206
207 get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size); 207 get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size);
208 *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR; 208 *mc_size = actual_size + SECTION_HDR_SIZE;
209 209
210out: 210out:
211 return mc; 211 return mc;
@@ -229,9 +229,10 @@ static int install_equiv_cpu_table(const u8 *buf)
229 return -ENOMEM; 229 return -ENOMEM;
230 } 230 }
231 231
232 get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size); 232 get_ucode_data(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
233 233
234 return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ 234 /* add header length */
235 return size + CONTAINER_HDR_SZ;
235} 236}
236 237
237static void free_equiv_cpu_table(void) 238static void free_equiv_cpu_table(void)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 807c2a2b80f1..82528799c5de 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
528 return ret; 528 return ret;
529} 529}
530 530
531static void ptrace_triggered(struct perf_event *bp, int nmi, 531static void ptrace_triggered(struct perf_event *bp,
532 struct perf_sample_data *data, 532 struct perf_sample_data *data,
533 struct pt_regs *regs) 533 struct pt_regs *regs)
534{ 534{
@@ -715,7 +715,8 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
715 attr.bp_type = HW_BREAKPOINT_W; 715 attr.bp_type = HW_BREAKPOINT_W;
716 attr.disabled = 1; 716 attr.disabled = 1;
717 717
718 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); 718 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
719 NULL, tsk);
719 720
720 /* 721 /*
721 * CHECKME: the previous code returned -EIO if the addr wasn't 722 * CHECKME: the previous code returned -EIO if the addr wasn't
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 8bbe8c56916d..b78643d0f9a5 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -10,7 +10,7 @@
10 10
11static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) 11static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
12{ 12{
13 u8 config, rev; 13 u8 config;
14 u16 word; 14 u16 word;
15 15
16 /* BIOS may enable hardware IRQ balancing for 16 /* BIOS may enable hardware IRQ balancing for
@@ -18,8 +18,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
18 * based platforms. 18 * based platforms.
19 * Disable SW irqbalance/affinity on those platforms. 19 * Disable SW irqbalance/affinity on those platforms.
20 */ 20 */
21 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 21 if (dev->revision > 0x9)
22 if (rev > 0x9)
23 return; 22 return;
24 23
25 /* enable access to config space*/ 24 /* enable access to config space*/
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0c016f727695..9242436e9937 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), 294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
295 }, 295 },
296 }, 296 },
297 { /* Handle reboot issue on Acer Aspire one */
298 .callback = set_bios_reboot,
299 .ident = "Acer Aspire One A110",
300 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
302 DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
303 },
304 },
297 { } 305 { }
298}; 306};
299 307
@@ -411,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
411 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), 419 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
412 }, 420 },
413 }, 421 },
422 { /* Handle problems with rebooting on the Latitude E6320. */
423 .callback = set_pci_reboot,
424 .ident = "Dell Latitude E6320",
425 .matches = {
426 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
427 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
428 },
429 },
430 { /* Handle problems with rebooting on the Latitude E5420. */
431 .callback = set_pci_reboot,
432 .ident = "Dell Latitude E5420",
433 .matches = {
434 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
435 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
436 },
437 },
438 { /* Handle problems with rebooting on the Latitude E6420. */
439 .callback = set_pci_reboot,
440 .ident = "Dell Latitude E6420",
441 .matches = {
442 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
443 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
444 },
445 },
414 { } 446 { }
415}; 447};
416 448
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 41235531b11c..36818f8ec2be 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -97,6 +97,8 @@ relocate_kernel:
97 ret 97 ret
98 98
99identity_mapped: 99identity_mapped:
100 /* set return address to 0 if not preserving context */
101 pushl $0
100 /* store the start address on the stack */ 102 /* store the start address on the stack */
101 pushl %edx 103 pushl %edx
102 104
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 4de8f5b3d476..7a6f3b3be3cf 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -100,6 +100,8 @@ relocate_kernel:
100 ret 100 ret
101 101
102identity_mapped: 102identity_mapped:
103 /* set return address to 0 if not preserving context */
104 pushq $0
103 /* store the start address on the stack */ 105 /* store the start address on the stack */
104 pushq %rdx 106 pushq %rdx
105 107
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 40a24932a8a1..54ddaeb221c1 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -485,17 +485,18 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
485asmlinkage int 485asmlinkage int
486sys_sigsuspend(int history0, int history1, old_sigset_t mask) 486sys_sigsuspend(int history0, int history1, old_sigset_t mask)
487{ 487{
488 mask &= _BLOCKABLE; 488 sigset_t blocked;
489 spin_lock_irq(&current->sighand->siglock); 489
490 current->saved_sigmask = current->blocked; 490 current->saved_sigmask = current->blocked;
491 siginitset(&current->blocked, mask); 491
492 recalc_sigpending(); 492 mask &= _BLOCKABLE;
493 spin_unlock_irq(&current->sighand->siglock); 493 siginitset(&blocked, mask);
494 set_current_blocked(&blocked);
494 495
495 current->state = TASK_INTERRUPTIBLE; 496 current->state = TASK_INTERRUPTIBLE;
496 schedule(); 497 schedule();
497 set_restore_sigmask();
498 498
499 set_restore_sigmask();
499 return -ERESTARTNOHAND; 500 return -ERESTARTNOHAND;
500} 501}
501 502
@@ -572,10 +573,7 @@ unsigned long sys_sigreturn(struct pt_regs *regs)
572 goto badframe; 573 goto badframe;
573 574
574 sigdelsetmask(&set, ~_BLOCKABLE); 575 sigdelsetmask(&set, ~_BLOCKABLE);
575 spin_lock_irq(&current->sighand->siglock); 576 set_current_blocked(&set);
576 current->blocked = set;
577 recalc_sigpending();
578 spin_unlock_irq(&current->sighand->siglock);
579 577
580 if (restore_sigcontext(regs, &frame->sc, &ax)) 578 if (restore_sigcontext(regs, &frame->sc, &ax))
581 goto badframe; 579 goto badframe;
@@ -653,11 +651,15 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
653 651
654static int 652static int
655setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 653setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
656 sigset_t *set, struct pt_regs *regs) 654 struct pt_regs *regs)
657{ 655{
658 int usig = signr_convert(sig); 656 int usig = signr_convert(sig);
657 sigset_t *set = &current->blocked;
659 int ret; 658 int ret;
660 659
660 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
661 set = &current->saved_sigmask;
662
661 /* Set up the stack frame */ 663 /* Set up the stack frame */
662 if (is_ia32) { 664 if (is_ia32) {
663 if (ka->sa.sa_flags & SA_SIGINFO) 665 if (ka->sa.sa_flags & SA_SIGINFO)
@@ -672,12 +674,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
672 return -EFAULT; 674 return -EFAULT;
673 } 675 }
674 676
677 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
675 return ret; 678 return ret;
676} 679}
677 680
678static int 681static int
679handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 682handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
680 sigset_t *oldset, struct pt_regs *regs) 683 struct pt_regs *regs)
681{ 684{
682 sigset_t blocked; 685 sigset_t blocked;
683 int ret; 686 int ret;
@@ -712,20 +715,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
712 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 715 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
713 regs->flags &= ~X86_EFLAGS_TF; 716 regs->flags &= ~X86_EFLAGS_TF;
714 717
715 ret = setup_rt_frame(sig, ka, info, oldset, regs); 718 ret = setup_rt_frame(sig, ka, info, regs);
716 719
717 if (ret) 720 if (ret)
718 return ret; 721 return ret;
719 722
720#ifdef CONFIG_X86_64
721 /*
722 * This has nothing to do with segment registers,
723 * despite the name. This magic affects uaccess.h
724 * macros' behavior. Reset it to the normal setting.
725 */
726 set_fs(USER_DS);
727#endif
728
729 /* 723 /*
730 * Clear the direction flag as per the ABI for function entry. 724 * Clear the direction flag as per the ABI for function entry.
731 */ 725 */
@@ -767,7 +761,6 @@ static void do_signal(struct pt_regs *regs)
767 struct k_sigaction ka; 761 struct k_sigaction ka;
768 siginfo_t info; 762 siginfo_t info;
769 int signr; 763 int signr;
770 sigset_t *oldset;
771 764
772 /* 765 /*
773 * We want the common case to go fast, which is why we may in certain 766 * We want the common case to go fast, which is why we may in certain
@@ -779,23 +772,10 @@ static void do_signal(struct pt_regs *regs)
779 if (!user_mode(regs)) 772 if (!user_mode(regs))
780 return; 773 return;
781 774
782 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
783 oldset = &current->saved_sigmask;
784 else
785 oldset = &current->blocked;
786
787 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 775 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
788 if (signr > 0) { 776 if (signr > 0) {
789 /* Whee! Actually deliver the signal. */ 777 /* Whee! Actually deliver the signal. */
790 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 778 handle_signal(signr, &info, &ka, regs);
791 /*
792 * A signal was successfully delivered; the saved
793 * sigmask will have been stored in the signal frame,
794 * and will be restored by sigreturn, so we can simply
795 * clear the TS_RESTORE_SIGMASK flag.
796 */
797 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
798 }
799 return; 779 return;
800 } 780 }
801 781
@@ -823,7 +803,7 @@ static void do_signal(struct pt_regs *regs)
823 */ 803 */
824 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 804 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
825 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 805 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
826 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 806 set_current_blocked(&current->saved_sigmask);
827 } 807 }
828} 808}
829 809
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9fd3137230d4..9f548cb4a958 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -438,7 +438,7 @@ static void impress_friends(void)
438void __inquire_remote_apic(int apicid) 438void __inquire_remote_apic(int apicid)
439{ 439{
440 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; 440 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
441 char *names[] = { "ID", "VERSION", "SPIV" }; 441 const char * const names[] = { "ID", "VERSION", "SPIV" };
442 int timeout; 442 int timeout;
443 u32 status; 443 u32 status;
444 444
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 55d9bc03f696..fdd0c6430e5a 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace)
66} 66}
67EXPORT_SYMBOL_GPL(save_stack_trace); 67EXPORT_SYMBOL_GPL(save_stack_trace);
68 68
69void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) 69void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
70{ 70{
71 dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); 71 dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
72 if (trace->nr_entries < trace->max_entries) 72 if (trace->nr_entries < trace->max_entries)
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 30ac65df7d4e..e07a2fc876b9 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -36,6 +36,7 @@
36#include <asm/bootparam.h> 36#include <asm/bootparam.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
39#include <asm/swiotlb.h>
39#include <asm/fixmap.h> 40#include <asm/fixmap.h>
40#include <asm/proto.h> 41#include <asm/proto.h>
41#include <asm/setup.h> 42#include <asm/setup.h>
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 00cbb272627f..5a64d057be57 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -11,13 +11,13 @@
11 11
12#include <linux/clockchips.h> 12#include <linux/clockchips.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/i8253.h>
14#include <linux/time.h> 15#include <linux/time.h>
15#include <linux/mca.h> 16#include <linux/mca.h>
16 17
17#include <asm/vsyscall.h> 18#include <asm/vsyscall.h>
18#include <asm/x86_init.h> 19#include <asm/x86_init.h>
19#include <asm/i8259.h> 20#include <asm/i8259.h>
20#include <asm/i8253.h>
21#include <asm/timer.h> 21#include <asm/timer.h>
22#include <asm/hpet.h> 22#include <asm/hpet.h>
23#include <asm/time.h> 23#include <asm/time.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b9b67166f9de..fbc097a085ca 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -872,6 +872,12 @@ void __init trap_init(void)
872 set_bit(SYSCALL_VECTOR, used_vectors); 872 set_bit(SYSCALL_VECTOR, used_vectors);
873#endif 873#endif
874 874
875#ifdef CONFIG_X86_64
876 BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors));
877 set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall);
878 set_bit(VSYSCALL_EMU_VECTOR, used_vectors);
879#endif
880
875 /* 881 /*
876 * Should be a barrier for any external CPU state: 882 * Should be a barrier for any external CPU state:
877 */ 883 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6cc6922262af..db483369f10b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -5,7 +5,6 @@
5#include <linux/timer.h> 5#include <linux/timer.h>
6#include <linux/acpi_pmtmr.h> 6#include <linux/acpi_pmtmr.h>
7#include <linux/cpufreq.h> 7#include <linux/cpufreq.h>
8#include <linux/dmi.h>
9#include <linux/delay.h> 8#include <linux/delay.h>
10#include <linux/clocksource.h> 9#include <linux/clocksource.h>
11#include <linux/percpu.h> 10#include <linux/percpu.h>
@@ -777,7 +776,7 @@ static struct clocksource clocksource_tsc = {
777 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 776 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
778 CLOCK_SOURCE_MUST_VERIFY, 777 CLOCK_SOURCE_MUST_VERIFY,
779#ifdef CONFIG_X86_64 778#ifdef CONFIG_X86_64
780 .vread = vread_tsc, 779 .archdata = { .vclock_mode = VCLOCK_TSC },
781#endif 780#endif
782}; 781};
783 782
@@ -800,27 +799,6 @@ void mark_tsc_unstable(char *reason)
800 799
801EXPORT_SYMBOL_GPL(mark_tsc_unstable); 800EXPORT_SYMBOL_GPL(mark_tsc_unstable);
802 801
803static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
804{
805 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
806 d->ident);
807 tsc_unstable = 1;
808 return 0;
809}
810
811/* List of systems that have known TSC problems */
812static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
813 {
814 .callback = dmi_mark_tsc_unstable,
815 .ident = "IBM Thinkpad 380XD",
816 .matches = {
817 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
818 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
819 },
820 },
821 {}
822};
823
824static void __init check_system_tsc_reliable(void) 802static void __init check_system_tsc_reliable(void)
825{ 803{
826#ifdef CONFIG_MGEODE_LX 804#ifdef CONFIG_MGEODE_LX
@@ -1010,8 +988,6 @@ void __init tsc_init(void)
1010 lpj_fine = lpj; 988 lpj_fine = lpj;
1011 989
1012 use_tsc_delay(); 990 use_tsc_delay();
1013 /* Check and install the TSC clocksource */
1014 dmi_check_system(bad_tsc_dmi_table);
1015 991
1016 if (unsynchronized_tsc()) 992 if (unsynchronized_tsc())
1017 mark_tsc_unstable("TSCs unsynchronized"); 993 mark_tsc_unstable("TSCs unsynchronized");
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 89aed99aafce..4aa9c54a9b76 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -161,50 +161,47 @@ SECTIONS
161 161
162#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) 162#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
163#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 163#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
164#define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \
165 ADDR(.vsyscall_0) + offset \
166 : AT(VLOAD(.vsyscall_var_ ## x)) { \
167 *(.vsyscall_var_ ## x) \
168 } \
169 x = VVIRT(.vsyscall_var_ ## x);
170 164
171 . = ALIGN(4096); 165 . = ALIGN(4096);
172 __vsyscall_0 = .; 166 __vsyscall_0 = .;
173 167
174 . = VSYSCALL_ADDR; 168 . = VSYSCALL_ADDR;
175 .vsyscall_0 : AT(VLOAD(.vsyscall_0)) { 169 .vsyscall : AT(VLOAD(.vsyscall)) {
176 *(.vsyscall_0) 170 *(.vsyscall_0)
177 } :user
178 171
179 . = ALIGN(L1_CACHE_BYTES); 172 . = 1024;
180 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
181 *(.vsyscall_fn)
182 }
183
184 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
185 *(.vsyscall_1) 173 *(.vsyscall_1)
186 }
187 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
188 *(.vsyscall_2)
189 }
190 174
191 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { 175 . = 2048;
192 *(.vsyscall_3) 176 *(.vsyscall_2)
193 }
194
195#define __VVAR_KERNEL_LDS
196#include <asm/vvar.h>
197#undef __VVAR_KERNEL_LDS
198 177
199 . = __vsyscall_0 + PAGE_SIZE; 178 . = 4096; /* Pad the whole page. */
179 } :user =0xcc
180 . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE);
200 181
201#undef VSYSCALL_ADDR 182#undef VSYSCALL_ADDR
202#undef VLOAD_OFFSET 183#undef VLOAD_OFFSET
203#undef VLOAD 184#undef VLOAD
204#undef VVIRT_OFFSET 185#undef VVIRT_OFFSET
205#undef VVIRT 186#undef VVIRT
187
188 __vvar_page = .;
189
190 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
191
192 /* Place all vvars at the offsets in asm/vvar.h. */
193#define EMIT_VVAR(name, offset) \
194 . = offset; \
195 *(.vvar_ ## name)
196#define __VVAR_KERNEL_LDS
197#include <asm/vvar.h>
198#undef __VVAR_KERNEL_LDS
206#undef EMIT_VVAR 199#undef EMIT_VVAR
207 200
201 } :data
202
203 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
204
208#endif /* CONFIG_X86_64 */ 205#endif /* CONFIG_X86_64 */
209 206
210 /* Init code and data - will be freed after init */ 207 /* Init code and data - will be freed after init */
diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c
deleted file mode 100644
index a81aa9e9894c..000000000000
--- a/arch/x86/kernel/vread_tsc_64.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/* This code runs in userspace. */
2
3#define DISABLE_BRANCH_PROFILING
4#include <asm/vgtod.h>
5
6notrace cycle_t __vsyscall_fn vread_tsc(void)
7{
8 cycle_t ret;
9 u64 last;
10
11 /*
12 * Empirically, a fence (of type that depends on the CPU)
13 * before rdtsc is enough to ensure that rdtsc is ordered
14 * with respect to loads. The various CPU manuals are unclear
15 * as to whether rdtsc can be reordered with later loads,
16 * but no one has ever seen it happen.
17 */
18 rdtsc_barrier();
19 ret = (cycle_t)vget_cycles();
20
21 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
22
23 if (likely(ret >= last))
24 return ret;
25
26 /*
27 * GCC likes to generate cmov here, but this branch is extremely
28 * predictable (it's just a funciton of time and the likely is
29 * very likely) and there's a data dependence, so force GCC
30 * to generate a branch instead. I don't barrier() because
31 * we don't actually need a barrier, and if this function
32 * ever gets inlined it will generate worse code.
33 */
34 asm volatile ("");
35 return last;
36}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 3e682184d76c..dda7dff9cef7 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 2 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs. 3 * Copyright 2003 Andi Kleen, SuSE Labs.
4 * 4 *
5 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
6 *
5 * Thanks to hpa@transmeta.com for some useful hint. 7 * Thanks to hpa@transmeta.com for some useful hint.
6 * Special thanks to Ingo Molnar for his early experience with 8 * Special thanks to Ingo Molnar for his early experience with
7 * a different vsyscall implementation for Linux/IA32 and for the name. 9 * a different vsyscall implementation for Linux/IA32 and for the name.
@@ -11,10 +13,9 @@
11 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid 13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
12 * jumping out of line if necessary. We cannot add more with this 14 * jumping out of line if necessary. We cannot add more with this
13 * mechanism because older kernels won't return -ENOSYS. 15 * mechanism because older kernels won't return -ENOSYS.
14 * If we want more than four we need a vDSO.
15 * 16 *
16 * Note: the concept clashes with user mode linux. If you use UML and 17 * Note: the concept clashes with user mode linux. UML users should
17 * want per guest time just set the kernel.vsyscall64 sysctl to 0. 18 * use the vDSO.
18 */ 19 */
19 20
20/* Disable profiling for userspace code: */ 21/* Disable profiling for userspace code: */
@@ -32,9 +33,12 @@
32#include <linux/cpu.h> 33#include <linux/cpu.h>
33#include <linux/smp.h> 34#include <linux/smp.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
36#include <linux/syscalls.h>
37#include <linux/ratelimit.h>
35 38
36#include <asm/vsyscall.h> 39#include <asm/vsyscall.h>
37#include <asm/pgtable.h> 40#include <asm/pgtable.h>
41#include <asm/compat.h>
38#include <asm/page.h> 42#include <asm/page.h>
39#include <asm/unistd.h> 43#include <asm/unistd.h>
40#include <asm/fixmap.h> 44#include <asm/fixmap.h>
@@ -44,16 +48,12 @@
44#include <asm/desc.h> 48#include <asm/desc.h>
45#include <asm/topology.h> 49#include <asm/topology.h>
46#include <asm/vgtod.h> 50#include <asm/vgtod.h>
47 51#include <asm/traps.h>
48#define __vsyscall(nr) \
49 __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
50#define __syscall_clobber "r11","cx","memory"
51 52
52DEFINE_VVAR(int, vgetcpu_mode); 53DEFINE_VVAR(int, vgetcpu_mode);
53DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 54DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
54{ 55{
55 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), 56 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
56 .sysctl_enabled = 1,
57}; 57};
58 58
59void update_vsyscall_tz(void) 59void update_vsyscall_tz(void)
@@ -72,179 +72,149 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
72 unsigned long flags; 72 unsigned long flags;
73 73
74 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); 74 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
75
75 /* copy vsyscall data */ 76 /* copy vsyscall data */
76 vsyscall_gtod_data.clock.vread = clock->vread; 77 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
77 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; 78 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
78 vsyscall_gtod_data.clock.mask = clock->mask; 79 vsyscall_gtod_data.clock.mask = clock->mask;
79 vsyscall_gtod_data.clock.mult = mult; 80 vsyscall_gtod_data.clock.mult = mult;
80 vsyscall_gtod_data.clock.shift = clock->shift; 81 vsyscall_gtod_data.clock.shift = clock->shift;
81 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 82 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
82 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 83 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
83 vsyscall_gtod_data.wall_to_monotonic = *wtm; 84 vsyscall_gtod_data.wall_to_monotonic = *wtm;
84 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 85 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
86
85 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 87 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
86} 88}
87 89
88/* RED-PEN may want to readd seq locking, but then the variable should be 90static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
89 * write-once. 91 const char *message)
90 */
91static __always_inline void do_get_tz(struct timezone * tz)
92{ 92{
93 *tz = VVAR(vsyscall_gtod_data).sys_tz; 93 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
94} 94 struct task_struct *tsk;
95 95
96static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) 96 if (!show_unhandled_signals || !__ratelimit(&rs))
97{ 97 return;
98 int ret;
99 asm volatile("syscall"
100 : "=a" (ret)
101 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
102 : __syscall_clobber );
103 return ret;
104}
105 98
106static __always_inline long time_syscall(long *t) 99 tsk = current;
107{
108 long secs;
109 asm volatile("syscall"
110 : "=a" (secs)
111 : "0" (__NR_time),"D" (t) : __syscall_clobber);
112 return secs;
113}
114 100
115static __always_inline void do_vgettimeofday(struct timeval * tv) 101 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
116{ 102 level, tsk->comm, task_pid_nr(tsk),
117 cycle_t now, base, mask, cycle_delta; 103 message, regs->ip - 2, regs->cs,
118 unsigned seq; 104 regs->sp, regs->ax, regs->si, regs->di);
119 unsigned long mult, shift, nsec;
120 cycle_t (*vread)(void);
121 do {
122 seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
123
124 vread = VVAR(vsyscall_gtod_data).clock.vread;
125 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
126 !vread)) {
127 gettimeofday(tv,NULL);
128 return;
129 }
130
131 now = vread();
132 base = VVAR(vsyscall_gtod_data).clock.cycle_last;
133 mask = VVAR(vsyscall_gtod_data).clock.mask;
134 mult = VVAR(vsyscall_gtod_data).clock.mult;
135 shift = VVAR(vsyscall_gtod_data).clock.shift;
136
137 tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
138 nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
139 } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
140
141 /* calculate interval: */
142 cycle_delta = (now - base) & mask;
143 /* convert to nsecs: */
144 nsec += (cycle_delta * mult) >> shift;
145
146 while (nsec >= NSEC_PER_SEC) {
147 tv->tv_sec += 1;
148 nsec -= NSEC_PER_SEC;
149 }
150 tv->tv_usec = nsec / NSEC_PER_USEC;
151} 105}
152 106
153int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) 107static int addr_to_vsyscall_nr(unsigned long addr)
154{ 108{
155 if (tv) 109 int nr;
156 do_vgettimeofday(tv);
157 if (tz)
158 do_get_tz(tz);
159 return 0;
160}
161 110
162/* This will break when the xtime seconds get inaccurate, but that is 111 if ((addr & ~0xC00UL) != VSYSCALL_START)
163 * unlikely */ 112 return -EINVAL;
164time_t __vsyscall(1) vtime(time_t *t)
165{
166 unsigned seq;
167 time_t result;
168 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
169 return time_syscall(t);
170 113
171 do { 114 nr = (addr & 0xC00UL) >> 10;
172 seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); 115 if (nr >= 3)
116 return -EINVAL;
173 117
174 result = VVAR(vsyscall_gtod_data).wall_time_sec; 118 return nr;
119}
175 120
176 } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq)); 121void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
122{
123 struct task_struct *tsk;
124 unsigned long caller;
125 int vsyscall_nr;
126 long ret;
127
128 local_irq_enable();
129
130 /*
131 * Real 64-bit user mode code has cs == __USER_CS. Anything else
132 * is bogus.
133 */
134 if (regs->cs != __USER_CS) {
135 /*
136 * If we trapped from kernel mode, we might as well OOPS now
137 * instead of returning to some random address and OOPSing
138 * then.
139 */
140 BUG_ON(!user_mode(regs));
141
142 /* Compat mode and non-compat 32-bit CS should both segfault. */
143 warn_bad_vsyscall(KERN_WARNING, regs,
144 "illegal int 0xcc from 32-bit mode");
145 goto sigsegv;
146 }
177 147
178 if (t) 148 /*
179 *t = result; 149 * x86-ism here: regs->ip points to the instruction after the int 0xcc,
180 return result; 150 * and int 0xcc is two bytes long.
181} 151 */
152 vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2);
153 if (vsyscall_nr < 0) {
154 warn_bad_vsyscall(KERN_WARNING, regs,
155 "illegal int 0xcc (exploit attempt?)");
156 goto sigsegv;
157 }
182 158
183/* Fast way to get current CPU and node. 159 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
184 This helps to do per node and per CPU caches in user space. 160 warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)");
185 The result is not guaranteed without CPU affinity, but usually 161 goto sigsegv;
186 works out because the scheduler tries to keep a thread on the same 162 }
187 CPU.
188 163
189 tcache must point to a two element sized long array. 164 tsk = current;
190 All arguments can be NULL. */ 165 if (seccomp_mode(&tsk->seccomp))
191long __vsyscall(2) 166 do_exit(SIGKILL);
192vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) 167
193{ 168 switch (vsyscall_nr) {
194 unsigned int p; 169 case 0:
195 unsigned long j = 0; 170 ret = sys_gettimeofday(
196 171 (struct timeval __user *)regs->di,
197 /* Fast cache - only recompute value once per jiffies and avoid 172 (struct timezone __user *)regs->si);
198 relatively costly rdtscp/cpuid otherwise. 173 break;
199 This works because the scheduler usually keeps the process 174
200 on the same CPU and this syscall doesn't guarantee its 175 case 1:
201 results anyways. 176 ret = sys_time((time_t __user *)regs->di);
202 We do this here because otherwise user space would do it on 177 break;
203 its own in a likely inferior way (no access to jiffies). 178
204 If you don't like it pass NULL. */ 179 case 2:
205 if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) { 180 ret = sys_getcpu((unsigned __user *)regs->di,
206 p = tcache->blob[1]; 181 (unsigned __user *)regs->si,
207 } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { 182 0);
208 /* Load per CPU data from RDTSCP */ 183 break;
209 native_read_tscp(&p);
210 } else {
211 /* Load per CPU data from GDT */
212 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
213 } 184 }
214 if (tcache) { 185
215 tcache->blob[0] = j; 186 if (ret == -EFAULT) {
216 tcache->blob[1] = p; 187 /*
188 * Bad news -- userspace fed a bad pointer to a vsyscall.
189 *
190 * With a real vsyscall, that would have caused SIGSEGV.
191 * To make writing reliable exploits using the emulated
192 * vsyscalls harder, generate SIGSEGV here as well.
193 */
194 warn_bad_vsyscall(KERN_INFO, regs,
195 "vsyscall fault (exploit attempt?)");
196 goto sigsegv;
217 } 197 }
218 if (cpu)
219 *cpu = p & 0xfff;
220 if (node)
221 *node = p >> 12;
222 return 0;
223}
224 198
225static long __vsyscall(3) venosys_1(void) 199 regs->ax = ret;
226{
227 return -ENOSYS;
228}
229 200
230#ifdef CONFIG_SYSCTL 201 /* Emulate a ret instruction. */
231static ctl_table kernel_table2[] = { 202 regs->ip = caller;
232 { .procname = "vsyscall64", 203 regs->sp += 8;
233 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
234 .mode = 0644,
235 .proc_handler = proc_dointvec },
236 {}
237};
238 204
239static ctl_table kernel_root_table2[] = { 205 local_irq_disable();
240 { .procname = "kernel", .mode = 0555, 206 return;
241 .child = kernel_table2 }, 207
242 {} 208sigsegv:
243}; 209 regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */
244#endif 210 force_sig(SIGSEGV, current);
211 local_irq_disable();
212}
245 213
246/* Assume __initcall executes before all user space. Hopefully kmod 214/*
247 doesn't violate that. We'll find out if it does. */ 215 * Assume __initcall executes before all user space. Hopefully kmod
216 * doesn't violate that. We'll find out if it does.
217 */
248static void __cpuinit vsyscall_set_cpu(int cpu) 218static void __cpuinit vsyscall_set_cpu(int cpu)
249{ 219{
250 unsigned long d; 220 unsigned long d;
@@ -255,13 +225,15 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
255 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) 225 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
256 write_rdtscp_aux((node << 12) | cpu); 226 write_rdtscp_aux((node << 12) | cpu);
257 227
258 /* Store cpu number in limit so that it can be loaded quickly 228 /*
259 in user space in vgetcpu. 229 * Store cpu number in limit so that it can be loaded quickly
260 12 bits for the CPU and 8 bits for the node. */ 230 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
231 */
261 d = 0x0f40000000000ULL; 232 d = 0x0f40000000000ULL;
262 d |= cpu; 233 d |= cpu;
263 d |= (node & 0xf) << 12; 234 d |= (node & 0xf) << 12;
264 d |= (node >> 4) << 48; 235 d |= (node >> 4) << 48;
236
265 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 237 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
266} 238}
267 239
@@ -275,8 +247,10 @@ static int __cpuinit
275cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 247cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
276{ 248{
277 long cpu = (long)arg; 249 long cpu = (long)arg;
250
278 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 251 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
279 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); 252 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
253
280 return NOTIFY_DONE; 254 return NOTIFY_DONE;
281} 255}
282 256
@@ -284,25 +258,23 @@ void __init map_vsyscall(void)
284{ 258{
285 extern char __vsyscall_0; 259 extern char __vsyscall_0;
286 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); 260 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
261 extern char __vvar_page;
262 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
287 263
288 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ 264 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
289 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); 265 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
266 __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
267 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS);
290} 268}
291 269
292static int __init vsyscall_init(void) 270static int __init vsyscall_init(void)
293{ 271{
294 BUG_ON(((unsigned long) &vgettimeofday != 272 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
295 VSYSCALL_ADDR(__NR_vgettimeofday))); 273
296 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
297 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
298 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
299#ifdef CONFIG_SYSCTL
300 register_sysctl_table(kernel_root_table2);
301#endif
302 on_each_cpu(cpu_vsyscall_init, NULL, 1); 274 on_each_cpu(cpu_vsyscall_init, NULL, 1);
303 /* notifier priority > KVM */ 275 /* notifier priority > KVM */
304 hotcpu_notifier(cpu_vsyscall_notifier, 30); 276 hotcpu_notifier(cpu_vsyscall_notifier, 30);
277
305 return 0; 278 return 0;
306} 279}
307
308__initcall(vsyscall_init); 280__initcall(vsyscall_init);
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S
new file mode 100644
index 000000000000..ffa845eae5ca
--- /dev/null
+++ b/arch/x86/kernel/vsyscall_emu_64.S
@@ -0,0 +1,27 @@
1/*
2 * vsyscall_emu_64.S: Vsyscall emulation page
3 *
4 * Copyright (c) 2011 Andy Lutomirski
5 *
6 * Subject to the GNU General Public License, version 2
7 */
8
9#include <linux/linkage.h>
10#include <asm/irq_vectors.h>
11
12/* The unused parts of the page are filled with 0xcc by the linker script. */
13
14.section .vsyscall_0, "a"
15ENTRY(vsyscall_0)
16 int $VSYSCALL_EMU_VECTOR
17END(vsyscall_0)
18
19.section .vsyscall_1, "a"
20ENTRY(vsyscall_1)
21 int $VSYSCALL_EMU_VECTOR
22END(vsyscall_1)
23
24.section .vsyscall_2, "a"
25ENTRY(vsyscall_2)
26 int $VSYSCALL_EMU_VECTOR
27END(vsyscall_2)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 99c3f0589faa..988724b236b6 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -77,6 +77,5 @@ config KVM_MMU_AUDIT
77# the virtualization menu. 77# the virtualization menu.
78source drivers/vhost/Kconfig 78source drivers/vhost/Kconfig
79source drivers/lguest/Kconfig 79source drivers/lguest/Kconfig
80source drivers/virtio/Kconfig
81 80
82endif # VIRTUALIZATION 81endif # VIRTUALIZATION
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index db832fd65ecb..13ee258442ae 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -71,7 +71,8 @@
71#include <asm/stackprotector.h> 71#include <asm/stackprotector.h>
72#include <asm/reboot.h> /* for struct machine_ops */ 72#include <asm/reboot.h> /* for struct machine_ops */
73 73
74/*G:010 Welcome to the Guest! 74/*G:010
75 * Welcome to the Guest!
75 * 76 *
76 * The Guest in our tale is a simple creature: identical to the Host but 77 * The Guest in our tale is a simple creature: identical to the Host but
77 * behaving in simplified but equivalent ways. In particular, the Guest is the 78 * behaving in simplified but equivalent ways. In particular, the Guest is the
@@ -190,15 +191,23 @@ static void lazy_hcall4(unsigned long call,
190#endif 191#endif
191 192
192/*G:036 193/*G:036
193 * When lazy mode is turned off reset the per-cpu lazy mode variable and then 194 * When lazy mode is turned off, we issue the do-nothing hypercall to
194 * issue the do-nothing hypercall to flush any stored calls. 195 * flush any stored calls, and call the generic helper to reset the
195:*/ 196 * per-cpu lazy mode variable.
197 */
196static void lguest_leave_lazy_mmu_mode(void) 198static void lguest_leave_lazy_mmu_mode(void)
197{ 199{
198 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); 200 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
199 paravirt_leave_lazy_mmu(); 201 paravirt_leave_lazy_mmu();
200} 202}
201 203
204/*
205 * We also catch the end of context switch; we enter lazy mode for much of
206 * that too, so again we need to flush here.
207 *
208 * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
209 * mode, but unlike Xen, lguest doesn't care about the difference).
210 */
202static void lguest_end_context_switch(struct task_struct *next) 211static void lguest_end_context_switch(struct task_struct *next)
203{ 212{
204 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); 213 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
@@ -391,7 +400,7 @@ static void lguest_load_tr_desc(void)
391 * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. 400 * giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
392 * 401 *
393 * This instruction even it has its own Wikipedia entry. The Wikipedia entry 402 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
394 * has been translated into 5 languages. I am not making this up! 403 * has been translated into 6 languages. I am not making this up!
395 * 404 *
396 * We could get funky here and identify ourselves as "GenuineLguest", but 405 * We could get funky here and identify ourselves as "GenuineLguest", but
397 * instead we just use the real "cpuid" instruction. Then I pretty much turned 406 * instead we just use the real "cpuid" instruction. Then I pretty much turned
@@ -458,7 +467,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
458 /* 467 /*
459 * PAE systems can mark pages as non-executable. Linux calls this the 468 * PAE systems can mark pages as non-executable. Linux calls this the
460 * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced 469 * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
461 * Virus Protection). We just switch turn if off here, since we don't 470 * Virus Protection). We just switch it off here, since we don't
462 * support it. 471 * support it.
463 */ 472 */
464 case 0x80000001: 473 case 0x80000001:
@@ -520,17 +529,16 @@ static unsigned long lguest_read_cr2(void)
520 529
521/* See lguest_set_pte() below. */ 530/* See lguest_set_pte() below. */
522static bool cr3_changed = false; 531static bool cr3_changed = false;
532static unsigned long current_cr3;
523 533
524/* 534/*
525 * cr3 is the current toplevel pagetable page: the principle is the same as 535 * cr3 is the current toplevel pagetable page: the principle is the same as
526 * cr0. Keep a local copy, and tell the Host when it changes. The only 536 * cr0. Keep a local copy, and tell the Host when it changes.
527 * difference is that our local copy is in lguest_data because the Host needs
528 * to set it upon our initial hypercall.
529 */ 537 */
530static void lguest_write_cr3(unsigned long cr3) 538static void lguest_write_cr3(unsigned long cr3)
531{ 539{
532 lguest_data.pgdir = cr3;
533 lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); 540 lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
541 current_cr3 = cr3;
534 542
535 /* These two page tables are simple, linear, and used during boot */ 543 /* These two page tables are simple, linear, and used during boot */
536 if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table)) 544 if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
@@ -539,7 +547,7 @@ static void lguest_write_cr3(unsigned long cr3)
539 547
540static unsigned long lguest_read_cr3(void) 548static unsigned long lguest_read_cr3(void)
541{ 549{
542 return lguest_data.pgdir; 550 return current_cr3;
543} 551}
544 552
545/* cr4 is used to enable and disable PGE, but we don't care. */ 553/* cr4 is used to enable and disable PGE, but we don't care. */
@@ -641,7 +649,7 @@ static void lguest_write_cr4(unsigned long val)
641 649
642/* 650/*
643 * The Guest calls this after it has set a second-level entry (pte), ie. to map 651 * The Guest calls this after it has set a second-level entry (pte), ie. to map
644 * a page into a process' address space. Wetell the Host the toplevel and 652 * a page into a process' address space. We tell the Host the toplevel and
645 * address this corresponds to. The Guest uses one pagetable per process, so 653 * address this corresponds to. The Guest uses one pagetable per process, so
646 * we need to tell the Host which one we're changing (mm->pgd). 654 * we need to tell the Host which one we're changing (mm->pgd).
647 */ 655 */
@@ -758,7 +766,7 @@ static void lguest_pmd_clear(pmd_t *pmdp)
758static void lguest_flush_tlb_single(unsigned long addr) 766static void lguest_flush_tlb_single(unsigned long addr)
759{ 767{
760 /* Simply set it to zero: if it was not, it will fault back in. */ 768 /* Simply set it to zero: if it was not, it will fault back in. */
761 lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); 769 lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
762} 770}
763 771
764/* 772/*
@@ -1140,7 +1148,7 @@ static struct notifier_block paniced = {
1140static __init char *lguest_memory_setup(void) 1148static __init char *lguest_memory_setup(void)
1141{ 1149{
1142 /* 1150 /*
1143 *The Linux bootloader header contains an "e820" memory map: the 1151 * The Linux bootloader header contains an "e820" memory map: the
1144 * Launcher populated the first entry with our memory limit. 1152 * Launcher populated the first entry with our memory limit.
1145 */ 1153 */
1146 e820_add_region(boot_params.e820_map[0].addr, 1154 e820_add_region(boot_params.e820_map[0].addr,
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 4f420c2f2d55..6ddfe4fc23c3 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -6,18 +6,22 @@
6#include <asm/processor-flags.h> 6#include <asm/processor-flags.h>
7 7
8/*G:020 8/*G:020
9 * Our story starts with the kernel booting into startup_32 in 9
10 * arch/x86/kernel/head_32.S. It expects a boot header, which is created by 10 * Our story starts with the bzImage: booting starts at startup_32 in
11 * the bootloader (the Launcher in our case). 11 * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real
12 * kernel in place and then jumps into it: startup_32 in
13 * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi
14 * register, which is created by the bootloader (the Launcher in our case).
12 * 15 *
13 * The startup_32 function does very little: it clears the uninitialized global 16 * The startup_32 function does very little: it clears the uninitialized global
14 * C variables which we expect to be zero (ie. BSS) and then copies the boot 17 * C variables which we expect to be zero (ie. BSS) and then copies the boot
15 * header and kernel command line somewhere safe. Finally it checks the 18 * header and kernel command line somewhere safe, and populates some initial
16 * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen: 19 * page tables. Finally it checks the 'hardware_subarch' field. This was
17 * if it's set to '1' (lguest's assigned number), then it calls us here. 20 * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's
21 * assigned number), then it calls us here.
18 * 22 *
19 * WARNING: be very careful here! We're running at addresses equal to physical 23 * WARNING: be very careful here! We're running at addresses equal to physical
20 * addesses (around 0), not above PAGE_OFFSET as most code expectes 24 * addresses (around 0), not above PAGE_OFFSET as most code expects
21 * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any 25 * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any
22 * data without remembering to subtract __PAGE_OFFSET! 26 * data without remembering to subtract __PAGE_OFFSET!
23 * 27 *
@@ -27,13 +31,18 @@
27.section .init.text, "ax", @progbits 31.section .init.text, "ax", @progbits
28ENTRY(lguest_entry) 32ENTRY(lguest_entry)
29 /* 33 /*
30 * We make the "initialization" hypercall now to tell the Host about 34 * We make the "initialization" hypercall now to tell the Host where
31 * us, and also find out where it put our page tables. 35 * our lguest_data struct is.
32 */ 36 */
33 movl $LHCALL_LGUEST_INIT, %eax 37 movl $LHCALL_LGUEST_INIT, %eax
34 movl $lguest_data - __PAGE_OFFSET, %ebx 38 movl $lguest_data - __PAGE_OFFSET, %ebx
35 int $LGUEST_TRAP_ENTRY 39 int $LGUEST_TRAP_ENTRY
36 40
41 /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */
42 movl $LHCALL_NEW_PGTABLE, %eax
43 movl $(initial_page_table - __PAGE_OFFSET), %ebx
44 int $LGUEST_TRAP_ENTRY
45
37 /* Set up the initial stack so we can run C code. */ 46 /* Set up the initial stack so we can run C code. */
38 movl $(init_thread_union+THREAD_SIZE),%esp 47 movl $(init_thread_union+THREAD_SIZE),%esp
39 48
@@ -96,12 +105,8 @@ send_interrupts:
96 */ 105 */
97 pushl %eax 106 pushl %eax
98 movl $LHCALL_SEND_INTERRUPTS, %eax 107 movl $LHCALL_SEND_INTERRUPTS, %eax
99 /* 108 /* This is the actual hypercall trap. */
100 * This is a vmcall instruction (same thing that KVM uses). Older 109 int $LGUEST_TRAP_ENTRY
101 * assembler versions might not know the "vmcall" instruction, so we
102 * create one manually here.
103 */
104 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
105 /* Put eax back the way we found it. */ 110 /* Put eax back the way we found it. */
106 popl %eax 111 popl %eax
107 ret 112 ret
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f2479f19ddde..b00f6785da74 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -18,8 +18,10 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
18 18
19lib-y := delay.o 19lib-y := delay.o
20lib-y += thunk_$(BITS).o 20lib-y += thunk_$(BITS).o
21lib-y += usercopy_$(BITS).o getuser.o putuser.o 21lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 22lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_SMP) += rwlock.o
24lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
23lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o 25lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
24 26
25obj-y += msr.o msr-reg.o msr-reg-export.o 27obj-y += msr.o msr-reg.o msr-reg-export.o
@@ -29,7 +31,7 @@ ifeq ($(CONFIG_X86_32),y)
29 lib-y += atomic64_cx8_32.o 31 lib-y += atomic64_cx8_32.o
30 lib-y += checksum_32.o 32 lib-y += checksum_32.o
31 lib-y += strstr_32.o 33 lib-y += strstr_32.o
32 lib-y += semaphore_32.o string_32.o 34 lib-y += string_32.o
33 lib-y += cmpxchg.o 35 lib-y += cmpxchg.o
34ifneq ($(CONFIG_X86_CMPXCHG64),y) 36ifneq ($(CONFIG_X86_CMPXCHG64),y)
35 lib-y += cmpxchg8b_emu.o atomic64_386_32.o 37 lib-y += cmpxchg8b_emu.o atomic64_386_32.o
@@ -40,7 +42,6 @@ else
40 lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o 42 lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
41 lib-y += thunk_64.o clear_page_64.o copy_page_64.o 43 lib-y += thunk_64.o clear_page_64.o copy_page_64.o
42 lib-y += memmove_64.o memset_64.o 44 lib-y += memmove_64.o memset_64.o
43 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o 45 lib-y += copy_user_64.o copy_user_nocache_64.o
44 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
45 lib-y += cmpxchg16b_emu.o 46 lib-y += cmpxchg16b_emu.o
46endif 47endif
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 6fec2d1cebe1..01c805ba5359 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -2,6 +2,7 @@
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h> 4#include <asm/dwarf2.h>
5#include <asm/alternative-asm.h>
5 6
6 ALIGN 7 ALIGN
7copy_page_c: 8copy_page_c:
@@ -110,10 +111,6 @@ ENDPROC(copy_page)
1102: 1112:
111 .previous 112 .previous
112 .section .altinstructions,"a" 113 .section .altinstructions,"a"
113 .align 8 114 altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \
114 .quad copy_page 115 .Lcopy_page_end-copy_page, 2b-1b
115 .quad 1b
116 .word X86_FEATURE_REP_GOOD
117 .byte .Lcopy_page_end - copy_page
118 .byte 2b - 1b
119 .previous 116 .previous
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index d0ec9c2936d7..ee164610ec46 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -9,6 +9,7 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h> 10#include <asm/dwarf2.h>
11#include <asm/cpufeature.h> 11#include <asm/cpufeature.h>
12#include <asm/alternative-asm.h>
12 13
13#undef memmove 14#undef memmove
14 15
@@ -214,11 +215,9 @@ ENTRY(memmove)
214 .previous 215 .previous
215 216
216 .section .altinstructions,"a" 217 .section .altinstructions,"a"
217 .align 8 218 altinstruction_entry .Lmemmove_begin_forward, \
218 .quad .Lmemmove_begin_forward 219 .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS, \
219 .quad .Lmemmove_begin_forward_efs 220 .Lmemmove_end_forward-.Lmemmove_begin_forward, \
220 .word X86_FEATURE_ERMS 221 .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
221 .byte .Lmemmove_end_forward-.Lmemmove_begin_forward
222 .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
223 .previous 222 .previous
224ENDPROC(memmove) 223ENDPROC(memmove)
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
new file mode 100644
index 000000000000..1cad22139c88
--- /dev/null
+++ b/arch/x86/lib/rwlock.S
@@ -0,0 +1,44 @@
1/* Slow paths of read/write spinlocks. */
2
3#include <linux/linkage.h>
4#include <asm/alternative-asm.h>
5#include <asm/frame.h>
6#include <asm/rwlock.h>
7
8#ifdef CONFIG_X86_32
9# define __lock_ptr eax
10#else
11# define __lock_ptr rdi
12#endif
13
14ENTRY(__write_lock_failed)
15 CFI_STARTPROC
16 FRAME
170: LOCK_PREFIX
18 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
191: rep; nop
20 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
21 jne 1b
22 LOCK_PREFIX
23 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24 jnz 0b
25 ENDFRAME
26 ret
27 CFI_ENDPROC
28END(__write_lock_failed)
29
30ENTRY(__read_lock_failed)
31 CFI_STARTPROC
32 FRAME
330: LOCK_PREFIX
34 READ_LOCK_SIZE(inc) (%__lock_ptr)
351: rep; nop
36 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
37 js 1b
38 LOCK_PREFIX
39 READ_LOCK_SIZE(dec) (%__lock_ptr)
40 js 0b
41 ENDFRAME
42 ret
43 CFI_ENDPROC
44END(__read_lock_failed)
diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
deleted file mode 100644
index 05ea55f71405..000000000000
--- a/arch/x86/lib/rwlock_64.S
+++ /dev/null
@@ -1,38 +0,0 @@
1/* Slow paths of read/write spinlocks. */
2
3#include <linux/linkage.h>
4#include <asm/rwlock.h>
5#include <asm/alternative-asm.h>
6#include <asm/dwarf2.h>
7
8/* rdi: pointer to rwlock_t */
9ENTRY(__write_lock_failed)
10 CFI_STARTPROC
11 LOCK_PREFIX
12 addl $RW_LOCK_BIAS,(%rdi)
131: rep
14 nop
15 cmpl $RW_LOCK_BIAS,(%rdi)
16 jne 1b
17 LOCK_PREFIX
18 subl $RW_LOCK_BIAS,(%rdi)
19 jnz __write_lock_failed
20 ret
21 CFI_ENDPROC
22END(__write_lock_failed)
23
24/* rdi: pointer to rwlock_t */
25ENTRY(__read_lock_failed)
26 CFI_STARTPROC
27 LOCK_PREFIX
28 incl (%rdi)
291: rep
30 nop
31 cmpl $1,(%rdi)
32 js 1b
33 LOCK_PREFIX
34 decl (%rdi)
35 js __read_lock_failed
36 ret
37 CFI_ENDPROC
38END(__read_lock_failed)
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem.S
index 67743977398b..5dff5f042468 100644
--- a/arch/x86/lib/rwsem_64.S
+++ b/arch/x86/lib/rwsem.S
@@ -1,4 +1,51 @@
1/* 1/*
2 * x86 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15
16#include <linux/linkage.h>
17#include <asm/alternative-asm.h>
18#include <asm/dwarf2.h>
19
20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
22
23#ifdef CONFIG_X86_32
24
25/*
26 * The semaphore operations have a special calling sequence that
27 * allow us to do a simpler in-line version of them. These routines
28 * need to convert that sequence back into the C sequence when
29 * there is contention on the semaphore.
30 *
31 * %eax contains the semaphore pointer on entry. Save the C-clobbered
32 * registers (%eax, %edx and %ecx) except %eax whish is either a return
33 * value or just clobbered..
34 */
35
36#define save_common_regs \
37 pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0
38
39#define restore_common_regs \
40 popl_cfi %ecx; CFI_RESTORE ecx
41
42 /* Avoid uglifying the argument copying x86-64 needs to do. */
43 .macro movq src, dst
44 .endm
45
46#else
47
48/*
2 * x86-64 rwsem wrappers 49 * x86-64 rwsem wrappers
3 * 50 *
4 * This interfaces the inline asm code to the slow-path 51 * This interfaces the inline asm code to the slow-path
@@ -16,12 +63,6 @@
16 * but %rdi, %rsi, %rcx, %r8-r11 always need saving. 63 * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
17 */ 64 */
18 65
19#include <linux/linkage.h>
20#include <asm/rwlock.h>
21#include <asm/alternative-asm.h>
22#include <asm/frame.h>
23#include <asm/dwarf2.h>
24
25#define save_common_regs \ 66#define save_common_regs \
26 pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ 67 pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
27 pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ 68 pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
@@ -40,16 +81,18 @@
40 popq_cfi %rsi; CFI_RESTORE rsi; \ 81 popq_cfi %rsi; CFI_RESTORE rsi; \
41 popq_cfi %rdi; CFI_RESTORE rdi 82 popq_cfi %rdi; CFI_RESTORE rdi
42 83
84#endif
85
43/* Fix up special calling conventions */ 86/* Fix up special calling conventions */
44ENTRY(call_rwsem_down_read_failed) 87ENTRY(call_rwsem_down_read_failed)
45 CFI_STARTPROC 88 CFI_STARTPROC
46 save_common_regs 89 save_common_regs
47 pushq_cfi %rdx 90 __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
48 CFI_REL_OFFSET rdx, 0 91 CFI_REL_OFFSET __ASM_REG(dx), 0
49 movq %rax,%rdi 92 movq %rax,%rdi
50 call rwsem_down_read_failed 93 call rwsem_down_read_failed
51 popq_cfi %rdx 94 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
52 CFI_RESTORE rdx 95 CFI_RESTORE __ASM_REG(dx)
53 restore_common_regs 96 restore_common_regs
54 ret 97 ret
55 CFI_ENDPROC 98 CFI_ENDPROC
@@ -67,7 +110,8 @@ ENDPROC(call_rwsem_down_write_failed)
67 110
68ENTRY(call_rwsem_wake) 111ENTRY(call_rwsem_wake)
69 CFI_STARTPROC 112 CFI_STARTPROC
70 decl %edx /* do nothing if still outstanding active readers */ 113 /* do nothing if still outstanding active readers */
114 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
71 jnz 1f 115 jnz 1f
72 save_common_regs 116 save_common_regs
73 movq %rax,%rdi 117 movq %rax,%rdi
@@ -77,16 +121,15 @@ ENTRY(call_rwsem_wake)
77 CFI_ENDPROC 121 CFI_ENDPROC
78ENDPROC(call_rwsem_wake) 122ENDPROC(call_rwsem_wake)
79 123
80/* Fix up special calling conventions */
81ENTRY(call_rwsem_downgrade_wake) 124ENTRY(call_rwsem_downgrade_wake)
82 CFI_STARTPROC 125 CFI_STARTPROC
83 save_common_regs 126 save_common_regs
84 pushq_cfi %rdx 127 __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
85 CFI_REL_OFFSET rdx, 0 128 CFI_REL_OFFSET __ASM_REG(dx), 0
86 movq %rax,%rdi 129 movq %rax,%rdi
87 call rwsem_downgrade_wake 130 call rwsem_downgrade_wake
88 popq_cfi %rdx 131 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
89 CFI_RESTORE rdx 132 CFI_RESTORE __ASM_REG(dx)
90 restore_common_regs 133 restore_common_regs
91 ret 134 ret
92 CFI_ENDPROC 135 CFI_ENDPROC
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
deleted file mode 100644
index 06691daa4108..000000000000
--- a/arch/x86/lib/semaphore_32.S
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * i386 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15
16#include <linux/linkage.h>
17#include <asm/rwlock.h>
18#include <asm/alternative-asm.h>
19#include <asm/frame.h>
20#include <asm/dwarf2.h>
21
22/*
23 * The semaphore operations have a special calling sequence that
24 * allow us to do a simpler in-line version of them. These routines
25 * need to convert that sequence back into the C sequence when
26 * there is contention on the semaphore.
27 *
28 * %eax contains the semaphore pointer on entry. Save the C-clobbered
29 * registers (%eax, %edx and %ecx) except %eax whish is either a return
30 * value or just clobbered..
31 */
32 .section .sched.text, "ax"
33
34/*
35 * rw spinlock fallbacks
36 */
37#ifdef CONFIG_SMP
38ENTRY(__write_lock_failed)
39 CFI_STARTPROC
40 FRAME
412: LOCK_PREFIX
42 addl $ RW_LOCK_BIAS,(%eax)
431: rep; nop
44 cmpl $ RW_LOCK_BIAS,(%eax)
45 jne 1b
46 LOCK_PREFIX
47 subl $ RW_LOCK_BIAS,(%eax)
48 jnz 2b
49 ENDFRAME
50 ret
51 CFI_ENDPROC
52 ENDPROC(__write_lock_failed)
53
54ENTRY(__read_lock_failed)
55 CFI_STARTPROC
56 FRAME
572: LOCK_PREFIX
58 incl (%eax)
591: rep; nop
60 cmpl $1,(%eax)
61 js 1b
62 LOCK_PREFIX
63 decl (%eax)
64 js 2b
65 ENDFRAME
66 ret
67 CFI_ENDPROC
68 ENDPROC(__read_lock_failed)
69
70#endif
71
72#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
73
74/* Fix up special calling conventions */
75ENTRY(call_rwsem_down_read_failed)
76 CFI_STARTPROC
77 pushl_cfi %ecx
78 CFI_REL_OFFSET ecx,0
79 pushl_cfi %edx
80 CFI_REL_OFFSET edx,0
81 call rwsem_down_read_failed
82 popl_cfi %edx
83 popl_cfi %ecx
84 ret
85 CFI_ENDPROC
86 ENDPROC(call_rwsem_down_read_failed)
87
88ENTRY(call_rwsem_down_write_failed)
89 CFI_STARTPROC
90 pushl_cfi %ecx
91 CFI_REL_OFFSET ecx,0
92 calll rwsem_down_write_failed
93 popl_cfi %ecx
94 ret
95 CFI_ENDPROC
96 ENDPROC(call_rwsem_down_write_failed)
97
98ENTRY(call_rwsem_wake)
99 CFI_STARTPROC
100 decw %dx /* do nothing if still outstanding active readers */
101 jnz 1f
102 pushl_cfi %ecx
103 CFI_REL_OFFSET ecx,0
104 call rwsem_wake
105 popl_cfi %ecx
1061: ret
107 CFI_ENDPROC
108 ENDPROC(call_rwsem_wake)
109
110/* Fix up special calling conventions */
111ENTRY(call_rwsem_downgrade_wake)
112 CFI_STARTPROC
113 pushl_cfi %ecx
114 CFI_REL_OFFSET ecx,0
115 pushl_cfi %edx
116 CFI_REL_OFFSET edx,0
117 call rwsem_downgrade_wake
118 popl_cfi %edx
119 popl_cfi %ecx
120 ret
121 CFI_ENDPROC
122 ENDPROC(call_rwsem_downgrade_wake)
123
124#endif
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 782b082c9ff7..a63efd6bb6a5 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -5,50 +5,41 @@
5 * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc. 5 * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
6 * Subject to the GNU public license, v.2. No warranty of any kind. 6 * Subject to the GNU public license, v.2. No warranty of any kind.
7 */ 7 */
8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/calling.h>
8 11
9 #include <linux/linkage.h> 12 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
10 #include <asm/dwarf2.h> 13 .macro THUNK name, func, put_ret_addr_in_rdi=0
11 #include <asm/calling.h>
12 #include <asm/rwlock.h>
13
14 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
15 .macro thunk name,func
16 .globl \name
17\name:
18 CFI_STARTPROC
19 SAVE_ARGS
20 call \func
21 jmp restore
22 CFI_ENDPROC
23 .endm
24
25#ifdef CONFIG_TRACE_IRQFLAGS
26 /* put return address in rdi (arg1) */
27 .macro thunk_ra name,func
28 .globl \name 14 .globl \name
29\name: 15\name:
30 CFI_STARTPROC 16 CFI_STARTPROC
17
18 /* this one pushes 9 elems, the next one would be %rIP */
31 SAVE_ARGS 19 SAVE_ARGS
32 /* SAVE_ARGS pushs 9 elements */ 20
33 /* the next element would be the rip */ 21 .if \put_ret_addr_in_rdi
34 movq 9*8(%rsp), %rdi 22 movq_cfi_restore 9*8, rdi
23 .endif
24
35 call \func 25 call \func
36 jmp restore 26 jmp restore
37 CFI_ENDPROC 27 CFI_ENDPROC
38 .endm 28 .endm
39 29
40 thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller 30#ifdef CONFIG_TRACE_IRQFLAGS
41 thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller 31 THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
32 THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
42#endif 33#endif
43 34
44#ifdef CONFIG_DEBUG_LOCK_ALLOC 35#ifdef CONFIG_DEBUG_LOCK_ALLOC
45 thunk lockdep_sys_exit_thunk,lockdep_sys_exit 36 THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
46#endif 37#endif
47 38
48 /* SAVE_ARGS below is used only for the .cfi directives it contains. */ 39 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
49 CFI_STARTPROC 40 CFI_STARTPROC
50 SAVE_ARGS 41 SAVE_ARGS
51restore: 42restore:
52 RESTORE_ARGS 43 RESTORE_ARGS
53 ret 44 ret
54 CFI_ENDPROC 45 CFI_ENDPROC
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
new file mode 100644
index 000000000000..97be9cb54483
--- /dev/null
+++ b/arch/x86/lib/usercopy.c
@@ -0,0 +1,43 @@
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
10/*
11 * best effort, GUP based copy_from_user() that is NMI-safe
12 */
13unsigned long
14copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
15{
16 unsigned long offset, addr = (unsigned long)from;
17 unsigned long size, len = 0;
18 struct page *page;
19 void *map;
20 int ret;
21
22 do {
23 ret = __get_user_pages_fast(addr, 1, 0, &page);
24 if (!ret)
25 break;
26
27 offset = addr & (PAGE_SIZE - 1);
28 size = min(PAGE_SIZE - offset, n - len);
29
30 map = kmap_atomic(page);
31 memcpy(to, map+offset, size);
32 kunmap_atomic(map);
33 put_page(page);
34
35 len += size;
36 to += size;
37 addr += size;
38
39 } while (len < n);
40
41 return len;
42}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2dbf6bf4c7e5..4d09df054e39 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1059 if (unlikely(error_code & PF_RSVD)) 1059 if (unlikely(error_code & PF_RSVD))
1060 pgtable_bad(regs, error_code, address); 1060 pgtable_bad(regs, error_code, address);
1061 1061
1062 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1062 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1063 1063
1064 /* 1064 /*
1065 * If we're in an interrupt, have no user context or are running 1065 * If we're in an interrupt, have no user context or are running
@@ -1161,11 +1161,11 @@ good_area:
1161 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1161 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1162 if (fault & VM_FAULT_MAJOR) { 1162 if (fault & VM_FAULT_MAJOR) {
1163 tsk->maj_flt++; 1163 tsk->maj_flt++;
1164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1165 regs, address); 1165 regs, address);
1166 } else { 1166 } else {
1167 tsk->min_flt++; 1167 tsk->min_flt++;
1168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
1169 regs, address); 1169 regs, address);
1170 } 1170 }
1171 if (fault & VM_FAULT_RETRY) { 1171 if (fault & VM_FAULT_RETRY) {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d865c4aeec55..bbaaa005bf0e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -28,6 +28,7 @@
28#include <linux/poison.h> 28#include <linux/poison.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/memory.h>
31#include <linux/memory_hotplug.h> 32#include <linux/memory_hotplug.h>
32#include <linux/nmi.h> 33#include <linux/nmi.h>
33#include <linux/gfp.h> 34#include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
895} 896}
896 897
897#ifdef CONFIG_X86_UV 898#ifdef CONFIG_X86_UV
898#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
899
900unsigned long memory_block_size_bytes(void) 899unsigned long memory_block_size_bytes(void)
901{ 900{
902 if (is_uv_system()) { 901 if (is_uv_system()) {
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index 704a37cedddb..dab41876cdd5 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
185 e->trace.entries = e->trace_entries; 185 e->trace.entries = e->trace_entries;
186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); 186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
187 e->trace.skip = 0; 187 e->trace.skip = 0;
188 save_stack_trace_regs(&e->trace, regs); 188 save_stack_trace_regs(regs, &e->trace);
189 189
190 /* Round address down to nearest 16 bytes */ 190 /* Round address down to nearest 16 bytes */
191 shadow_copy = kmemcheck_shadow_lookup(address 191 shadow_copy = kmemcheck_shadow_lookup(address
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index f5510d889a22..fbeaaf416610 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -496,6 +496,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
496 496
497static int __init numa_register_memblks(struct numa_meminfo *mi) 497static int __init numa_register_memblks(struct numa_meminfo *mi)
498{ 498{
499 unsigned long uninitialized_var(pfn_align);
499 int i, nid; 500 int i, nid;
500 501
501 /* Account for nodes with cpus and no memory */ 502 /* Account for nodes with cpus and no memory */
@@ -511,6 +512,20 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
511 512
512 /* for out of order entries */ 513 /* for out of order entries */
513 sort_node_map(); 514 sort_node_map();
515
516 /*
517 * If sections array is gonna be used for pfn -> nid mapping, check
518 * whether its granularity is fine enough.
519 */
520#ifdef NODE_NOT_IN_PAGE_FLAGS
521 pfn_align = node_map_pfn_alignment();
522 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
523 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
524 PFN_PHYS(pfn_align) >> 20,
525 PFN_PHYS(PAGES_PER_SECTION) >> 20);
526 return -EINVAL;
527 }
528#endif
514 if (!numa_meminfo_cover_memory(mi)) 529 if (!numa_meminfo_cover_memory(mi))
515 return -EINVAL; 530 return -EINVAL;
516 531
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 849a975d3fa0..3adebe7e536a 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -41,7 +41,7 @@
41 * physnode_map[16-31] = 1; 41 * physnode_map[16-31] = 1;
42 * physnode_map[32- ] = -1; 42 * physnode_map[32- ] = -1;
43 */ 43 */
44s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; 44s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
45EXPORT_SYMBOL(physnode_map); 45EXPORT_SYMBOL(physnode_map);
46 46
47void memory_present(int nid, unsigned long start, unsigned long end) 47void memory_present(int nid, unsigned long start, unsigned long end)
@@ -52,8 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
52 nid, start, end); 52 nid, start, end);
53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); 53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
54 printk(KERN_DEBUG " "); 54 printk(KERN_DEBUG " ");
55 for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { 55 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
56 physnode_map[pfn / PAGES_PER_ELEMENT] = nid; 56 physnode_map[pfn / PAGES_PER_SECTION] = nid;
57 printk(KERN_CONT "%lx ", pfn); 57 printk(KERN_CONT "%lx ", pfn);
58 } 58 }
59 printk(KERN_CONT "\n"); 59 printk(KERN_CONT "\n");
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index e1d106909218..b0086567271c 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -123,12 +123,11 @@ static int pageattr_test(void)
123 if (print) 123 if (print)
124 printk(KERN_INFO "CPA self-test:\n"); 124 printk(KERN_INFO "CPA self-test:\n");
125 125
126 bm = vmalloc((max_pfn_mapped + 7) / 8); 126 bm = vzalloc((max_pfn_mapped + 7) / 8);
127 if (!bm) { 127 if (!bm) {
128 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); 128 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
129 return -ENOMEM; 129 return -ENOMEM;
130 } 130 }
131 memset(bm, 0, (max_pfn_mapped + 7) / 8);
132 131
133 failed += print_split(&sa); 132 failed += print_split(&sa);
134 srandom32(100); 133 srandom32(100);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index a5b64ab4cd6e..bff89dfe3619 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -11,10 +11,11 @@
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/compat.h>
15#include <linux/uaccess.h>
16
14#include <asm/ptrace.h> 17#include <asm/ptrace.h>
15#include <asm/uaccess.h>
16#include <asm/stacktrace.h> 18#include <asm/stacktrace.h>
17#include <linux/compat.h>
18 19
19static int backtrace_stack(void *data, char *name) 20static int backtrace_stack(void *data, char *name)
20{ 21{
@@ -40,13 +41,13 @@ static struct stacktrace_ops backtrace_ops = {
40static struct stack_frame_ia32 * 41static struct stack_frame_ia32 *
41dump_user_backtrace_32(struct stack_frame_ia32 *head) 42dump_user_backtrace_32(struct stack_frame_ia32 *head)
42{ 43{
44 /* Also check accessibility of one struct frame_head beyond: */
43 struct stack_frame_ia32 bufhead[2]; 45 struct stack_frame_ia32 bufhead[2];
44 struct stack_frame_ia32 *fp; 46 struct stack_frame_ia32 *fp;
47 unsigned long bytes;
45 48
46 /* Also check accessibility of one struct frame_head beyond */ 49 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
47 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) 50 if (bytes != sizeof(bufhead))
48 return NULL;
49 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
50 return NULL; 51 return NULL;
51 52
52 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); 53 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
@@ -87,12 +88,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
87 88
88static struct stack_frame *dump_user_backtrace(struct stack_frame *head) 89static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
89{ 90{
91 /* Also check accessibility of one struct frame_head beyond: */
90 struct stack_frame bufhead[2]; 92 struct stack_frame bufhead[2];
93 unsigned long bytes;
91 94
92 /* Also check accessibility of one struct stack_frame beyond */ 95 bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
93 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) 96 if (bytes != sizeof(bufhead))
94 return NULL;
95 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
96 return NULL; 97 return NULL;
97 98
98 oprofile_add_trace(bufhead[0].return_address); 99 oprofile_add_trace(bufhead[0].return_address);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index cf9750004a08..68894fdc034b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
112static int nmi_start(void) 112static int nmi_start(void)
113{ 113{
114 get_online_cpus(); 114 get_online_cpus();
115 on_each_cpu(nmi_cpu_start, NULL, 1);
116 ctr_running = 1; 115 ctr_running = 1;
116 /* make ctr_running visible to the nmi handler: */
117 smp_mb();
118 on_each_cpu(nmi_cpu_start, NULL, 1);
117 put_online_cpus(); 119 put_online_cpus();
118 return 0; 120 return 0;
119} 121}
@@ -504,15 +506,18 @@ static int nmi_setup(void)
504 506
505 nmi_enabled = 0; 507 nmi_enabled = 0;
506 ctr_running = 0; 508 ctr_running = 0;
507 barrier(); 509 /* make variables visible to the nmi handler: */
510 smp_mb();
508 err = register_die_notifier(&profile_exceptions_nb); 511 err = register_die_notifier(&profile_exceptions_nb);
509 if (err) 512 if (err)
510 goto fail; 513 goto fail;
511 514
512 get_online_cpus(); 515 get_online_cpus();
513 register_cpu_notifier(&oprofile_cpu_nb); 516 register_cpu_notifier(&oprofile_cpu_nb);
514 on_each_cpu(nmi_cpu_setup, NULL, 1);
515 nmi_enabled = 1; 517 nmi_enabled = 1;
518 /* make nmi_enabled visible to the nmi handler: */
519 smp_mb();
520 on_each_cpu(nmi_cpu_setup, NULL, 1);
516 put_online_cpus(); 521 put_online_cpus();
517 522
518 return 0; 523 return 0;
@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
531 nmi_enabled = 0; 536 nmi_enabled = 0;
532 ctr_running = 0; 537 ctr_running = 0;
533 put_online_cpus(); 538 put_online_cpus();
534 barrier(); 539 /* make variables visible to the nmi handler: */
540 smp_mb();
535 unregister_die_notifier(&profile_exceptions_nb); 541 unregister_die_notifier(&profile_exceptions_nb);
536 msrs = &get_cpu_var(cpu_msrs); 542 msrs = &get_cpu_var(cpu_msrs);
537 model->shutdown(msrs); 543 model->shutdown(msrs);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 750c346ef50a..301e325992f6 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -519,7 +519,8 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
519 if (cfg->address < 0xFFFFFFFF) 519 if (cfg->address < 0xFFFFFFFF)
520 return 0; 520 return 0;
521 521
522 if (!strcmp(mcfg->header.oem_id, "SGI")) 522 if (!strcmp(mcfg->header.oem_id, "SGI") ||
523 !strcmp(mcfg->header.oem_id, "SGI2"))
523 return 0; 524 return 0;
524 525
525 if (mcfg->header.revision >= 1) { 526 if (mcfg->header.revision >= 1) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index fe008309ffec..1017c7bee388 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -1,8 +1,13 @@
1/* 1/*
2 * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux 2 * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
3 * x86 PCI core to support the Xen PCI Frontend 3 * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
4 * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
5 * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
6 * 0xcf8 PCI configuration read/write.
4 * 7 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil> 8 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
9 * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
10 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
6 */ 11 */
7#include <linux/module.h> 12#include <linux/module.h>
8#include <linux/init.h> 13#include <linux/init.h>
@@ -19,22 +24,53 @@
19#include <xen/events.h> 24#include <xen/events.h>
20#include <asm/xen/pci.h> 25#include <asm/xen/pci.h>
21 26
27static int xen_pcifront_enable_irq(struct pci_dev *dev)
28{
29 int rc;
30 int share = 1;
31 int pirq;
32 u8 gsi;
33
34 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
35 if (rc < 0) {
36 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
37 rc);
38 return rc;
39 }
40 /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
41 pirq = gsi;
42
43 if (gsi < NR_IRQS_LEGACY)
44 share = 0;
45
46 rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
47 if (rc < 0) {
48 dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
49 gsi, pirq, rc);
50 return rc;
51 }
52
53 dev->irq = rc;
54 dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
55 return 0;
56}
57
22#ifdef CONFIG_ACPI 58#ifdef CONFIG_ACPI
23static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, 59static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
24 int trigger, int polarity) 60 bool set_pirq)
25{ 61{
26 int rc, irq; 62 int rc, pirq = -1, irq = -1;
27 struct physdev_map_pirq map_irq; 63 struct physdev_map_pirq map_irq;
28 int shareable = 0; 64 int shareable = 0;
29 char *name; 65 char *name;
30 66
31 if (!xen_hvm_domain()) 67 if (set_pirq)
32 return -1; 68 pirq = gsi;
33 69
34 map_irq.domid = DOMID_SELF; 70 map_irq.domid = DOMID_SELF;
35 map_irq.type = MAP_PIRQ_TYPE_GSI; 71 map_irq.type = MAP_PIRQ_TYPE_GSI;
36 map_irq.index = gsi; 72 map_irq.index = gsi;
37 map_irq.pirq = -1; 73 map_irq.pirq = pirq;
38 74
39 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 75 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
40 if (rc) { 76 if (rc) {
@@ -42,7 +78,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
42 return -1; 78 return -1;
43 } 79 }
44 80
45 if (trigger == ACPI_EDGE_SENSITIVE) { 81 if (triggering == ACPI_EDGE_SENSITIVE) {
46 shareable = 0; 82 shareable = 0;
47 name = "ioapic-edge"; 83 name = "ioapic-edge";
48 } else { 84 } else {
@@ -50,12 +86,63 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
50 name = "ioapic-level"; 86 name = "ioapic-level";
51 } 87 }
52 88
89 if (gsi_override >= 0)
90 gsi = gsi_override;
91
53 irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); 92 irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
93 if (irq < 0)
94 goto out;
54 95
55 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 96 printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
97out:
98 return irq;
99}
100
101static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
102 int trigger, int polarity)
103{
104 if (!xen_hvm_domain())
105 return -1;
106
107 return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
108 false /* no mapping of GSI to PIRQ */);
109}
110
111#ifdef CONFIG_XEN_DOM0
112static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
113{
114 int rc, irq;
115 struct physdev_setup_gsi setup_gsi;
116
117 if (!xen_pv_domain())
118 return -1;
119
120 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
121 gsi, triggering, polarity);
122
123 irq = xen_register_pirq(gsi, gsi_override, triggering, true);
124
125 setup_gsi.gsi = gsi;
126 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
127 setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
128
129 rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
130 if (rc == -EEXIST)
131 printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
132 else if (rc) {
133 printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
134 gsi, rc);
135 }
56 136
57 return irq; 137 return irq;
58} 138}
139
140static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
141 int trigger, int polarity)
142{
143 return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
144}
145#endif
59#endif 146#endif
60 147
61#if defined(CONFIG_PCI_MSI) 148#if defined(CONFIG_PCI_MSI)
@@ -65,6 +152,43 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
65struct xen_pci_frontend_ops *xen_pci_frontend; 152struct xen_pci_frontend_ops *xen_pci_frontend;
66EXPORT_SYMBOL_GPL(xen_pci_frontend); 153EXPORT_SYMBOL_GPL(xen_pci_frontend);
67 154
155static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
156{
157 int irq, ret, i;
158 struct msi_desc *msidesc;
159 int *v;
160
161 v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
162 if (!v)
163 return -ENOMEM;
164
165 if (type == PCI_CAP_ID_MSIX)
166 ret = xen_pci_frontend_enable_msix(dev, v, nvec);
167 else
168 ret = xen_pci_frontend_enable_msi(dev, v);
169 if (ret)
170 goto error;
171 i = 0;
172 list_for_each_entry(msidesc, &dev->msi_list, list) {
173 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
174 (type == PCI_CAP_ID_MSIX) ?
175 "pcifront-msi-x" :
176 "pcifront-msi",
177 DOMID_SELF);
178 if (irq < 0)
179 goto free;
180 i++;
181 }
182 kfree(v);
183 return 0;
184
185error:
186 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
187free:
188 kfree(v);
189 return ret;
190}
191
68#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ 192#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
69 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0)) 193 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
70 194
@@ -123,67 +247,6 @@ error:
123 return -ENODEV; 247 return -ENODEV;
124} 248}
125 249
126/*
127 * For MSI interrupts we have to use drivers/xen/event.s functions to
128 * allocate an irq_desc and setup the right */
129
130
131static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
132{
133 int irq, ret, i;
134 struct msi_desc *msidesc;
135 int *v;
136
137 v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
138 if (!v)
139 return -ENOMEM;
140
141 if (type == PCI_CAP_ID_MSIX)
142 ret = xen_pci_frontend_enable_msix(dev, v, nvec);
143 else
144 ret = xen_pci_frontend_enable_msi(dev, v);
145 if (ret)
146 goto error;
147 i = 0;
148 list_for_each_entry(msidesc, &dev->msi_list, list) {
149 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
150 (type == PCI_CAP_ID_MSIX) ?
151 "pcifront-msi-x" :
152 "pcifront-msi",
153 DOMID_SELF);
154 if (irq < 0)
155 goto free;
156 i++;
157 }
158 kfree(v);
159 return 0;
160
161error:
162 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
163free:
164 kfree(v);
165 return ret;
166}
167
168static void xen_teardown_msi_irqs(struct pci_dev *dev)
169{
170 struct msi_desc *msidesc;
171
172 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
173 if (msidesc->msi_attrib.is_msix)
174 xen_pci_frontend_disable_msix(dev);
175 else
176 xen_pci_frontend_disable_msi(dev);
177
178 /* Free the IRQ's and the msidesc using the generic code. */
179 default_teardown_msi_irqs(dev);
180}
181
182static void xen_teardown_msi_irq(unsigned int irq)
183{
184 xen_destroy_irq(irq);
185}
186
187#ifdef CONFIG_XEN_DOM0 250#ifdef CONFIG_XEN_DOM0
188static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 251static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
189{ 252{
@@ -242,45 +305,28 @@ out:
242 return ret; 305 return ret;
243} 306}
244#endif 307#endif
245#endif
246 308
247static int xen_pcifront_enable_irq(struct pci_dev *dev) 309static void xen_teardown_msi_irqs(struct pci_dev *dev)
248{ 310{
249 int rc; 311 struct msi_desc *msidesc;
250 int share = 1;
251 int pirq;
252 u8 gsi;
253
254 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
255 if (rc < 0) {
256 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
257 rc);
258 return rc;
259 }
260
261 rc = xen_allocate_pirq_gsi(gsi);
262 if (rc < 0) {
263 dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n",
264 gsi, rc);
265 return rc;
266 }
267 pirq = rc;
268 312
269 if (gsi < NR_IRQS_LEGACY) 313 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
270 share = 0; 314 if (msidesc->msi_attrib.is_msix)
315 xen_pci_frontend_disable_msix(dev);
316 else
317 xen_pci_frontend_disable_msi(dev);
271 318
272 rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); 319 /* Free the IRQ's and the msidesc using the generic code. */
273 if (rc < 0) { 320 default_teardown_msi_irqs(dev);
274 dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", 321}
275 gsi, pirq, rc);
276 return rc;
277 }
278 322
279 dev->irq = rc; 323static void xen_teardown_msi_irq(unsigned int irq)
280 dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); 324{
281 return 0; 325 xen_destroy_irq(irq);
282} 326}
283 327
328#endif
329
284int __init pci_xen_init(void) 330int __init pci_xen_init(void)
285{ 331{
286 if (!xen_pv_domain() || xen_initial_domain()) 332 if (!xen_pv_domain() || xen_initial_domain())
@@ -327,104 +373,13 @@ int __init pci_xen_hvm_init(void)
327} 373}
328 374
329#ifdef CONFIG_XEN_DOM0 375#ifdef CONFIG_XEN_DOM0
330static int xen_register_pirq(u32 gsi, int triggering)
331{
332 int rc, pirq, irq = -1;
333 struct physdev_map_pirq map_irq;
334 int shareable = 0;
335 char *name;
336 bool gsi_override = false;
337
338 if (!xen_pv_domain())
339 return -1;
340
341 if (triggering == ACPI_EDGE_SENSITIVE) {
342 shareable = 0;
343 name = "ioapic-edge";
344 } else {
345 shareable = 1;
346 name = "ioapic-level";
347 }
348
349 pirq = xen_allocate_pirq_gsi(gsi);
350 if (pirq < 0)
351 goto out;
352
353 /* Before we bind the GSI to a Linux IRQ, check whether
354 * we need to override it with bus_irq (IRQ) value. Usually for
355 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
356 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
357 * but there are oddballs where the IRQ != GSI:
358 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
359 * which ends up being: gsi_to_irq[9] == 20
360 * (which is what acpi_gsi_to_irq ends up calling when starting the
361 * the ACPI interpreter and keels over since IRQ 9 has not been
362 * setup as we had setup IRQ 20 for it).
363 */
364 if (gsi == acpi_sci_override_gsi) {
365 /* Check whether the GSI != IRQ */
366 acpi_gsi_to_irq(gsi, &irq);
367 if (irq != gsi)
368 /* Bugger, we MUST have that IRQ. */
369 gsi_override = true;
370 }
371 if (gsi_override)
372 irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
373 else
374 irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
375 if (irq < 0)
376 goto out;
377
378 printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
379
380 map_irq.domid = DOMID_SELF;
381 map_irq.type = MAP_PIRQ_TYPE_GSI;
382 map_irq.index = gsi;
383 map_irq.pirq = pirq;
384
385 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
386 if (rc) {
387 printk(KERN_WARNING "xen map irq failed %d\n", rc);
388 return -1;
389 }
390
391out:
392 return irq;
393}
394
395static int xen_register_gsi(u32 gsi, int triggering, int polarity)
396{
397 int rc, irq;
398 struct physdev_setup_gsi setup_gsi;
399
400 if (!xen_pv_domain())
401 return -1;
402
403 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
404 gsi, triggering, polarity);
405
406 irq = xen_register_pirq(gsi, triggering);
407
408 setup_gsi.gsi = gsi;
409 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
410 setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
411
412 rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
413 if (rc == -EEXIST)
414 printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
415 else if (rc) {
416 printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
417 gsi, rc);
418 }
419
420 return irq;
421}
422
423static __init void xen_setup_acpi_sci(void) 376static __init void xen_setup_acpi_sci(void)
424{ 377{
425 int rc; 378 int rc;
426 int trigger, polarity; 379 int trigger, polarity;
427 int gsi = acpi_sci_override_gsi; 380 int gsi = acpi_sci_override_gsi;
381 int irq = -1;
382 int gsi_override = -1;
428 383
429 if (!gsi) 384 if (!gsi)
430 return; 385 return;
@@ -437,51 +392,43 @@ static __init void xen_setup_acpi_sci(void)
437 } 392 }
438 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 393 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
439 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; 394 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
440 395
441 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " 396 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
442 "polarity=%d\n", gsi, trigger, polarity); 397 "polarity=%d\n", gsi, trigger, polarity);
443 398
444 gsi = xen_register_gsi(gsi, trigger, polarity); 399 /* Before we bind the GSI to a Linux IRQ, check whether
400 * we need to override it with bus_irq (IRQ) value. Usually for
401 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
402 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
403 * but there are oddballs where the IRQ != GSI:
404 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
405 * which ends up being: gsi_to_irq[9] == 20
406 * (which is what acpi_gsi_to_irq ends up calling when starting the
407 * the ACPI interpreter and keels over since IRQ 9 has not been
408 * setup as we had setup IRQ 20 for it).
409 */
410 if (acpi_gsi_to_irq(gsi, &irq) == 0) {
411 /* Use the provided value if it's valid. */
412 if (irq >= 0)
413 gsi_override = irq;
414 }
415
416 gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
445 printk(KERN_INFO "xen: acpi sci %d\n", gsi); 417 printk(KERN_INFO "xen: acpi sci %d\n", gsi);
446 418
447 return; 419 return;
448} 420}
449 421
450static int acpi_register_gsi_xen(struct device *dev, u32 gsi, 422int __init pci_xen_initial_domain(void)
451 int trigger, int polarity)
452{ 423{
453 return xen_register_gsi(gsi, trigger, polarity); 424 int irq;
454}
455 425
456static int __init pci_xen_initial_domain(void)
457{
458#ifdef CONFIG_PCI_MSI 426#ifdef CONFIG_PCI_MSI
459 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; 427 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
460 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 428 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
461#endif 429#endif
462 xen_setup_acpi_sci(); 430 xen_setup_acpi_sci();
463 __acpi_register_gsi = acpi_register_gsi_xen; 431 __acpi_register_gsi = acpi_register_gsi_xen;
464
465 return 0;
466}
467
468void __init xen_setup_pirqs(void)
469{
470 int pirq, irq;
471
472 pci_xen_initial_domain();
473
474 if (0 == nr_ioapics) {
475 for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
476 pirq = xen_allocate_pirq_gsi(irq);
477 if (WARN(pirq < 0,
478 "Could not allocate PIRQ for legacy interrupt\n"))
479 break;
480 irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic");
481 }
482 return;
483 }
484
485 /* Pre-allocate legacy irqs */ 432 /* Pre-allocate legacy irqs */
486 for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { 433 for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
487 int trigger, polarity; 434 int trigger, polarity;
@@ -489,13 +436,17 @@ void __init xen_setup_pirqs(void)
489 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) 436 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
490 continue; 437 continue;
491 438
492 xen_register_pirq(irq, 439 xen_register_pirq(irq, -1 /* no GSI override */,
493 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); 440 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
441 true /* Map GSI to PIRQ */);
494 } 442 }
443 if (0 == nr_ioapics) {
444 for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
445 xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
446 }
447 return 0;
495} 448}
496#endif
497 449
498#ifdef CONFIG_XEN_DOM0
499struct xen_device_domain_owner { 450struct xen_device_domain_owner {
500 domid_t domain; 451 domid_t domain;
501 struct pci_dev *dev; 452 struct pci_dev *dev;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 474356b98ede..3ae4128013e6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -51,7 +51,17 @@
51int efi_enabled; 51int efi_enabled;
52EXPORT_SYMBOL(efi_enabled); 52EXPORT_SYMBOL(efi_enabled);
53 53
54struct efi efi; 54struct efi __read_mostly efi = {
55 .mps = EFI_INVALID_TABLE_ADDR,
56 .acpi = EFI_INVALID_TABLE_ADDR,
57 .acpi20 = EFI_INVALID_TABLE_ADDR,
58 .smbios = EFI_INVALID_TABLE_ADDR,
59 .sal_systab = EFI_INVALID_TABLE_ADDR,
60 .boot_info = EFI_INVALID_TABLE_ADDR,
61 .hcdp = EFI_INVALID_TABLE_ADDR,
62 .uga = EFI_INVALID_TABLE_ADDR,
63 .uv_systab = EFI_INVALID_TABLE_ADDR,
64};
55EXPORT_SYMBOL(efi); 65EXPORT_SYMBOL(efi);
56 66
57struct efi_memory_map memmap; 67struct efi_memory_map memmap;
@@ -79,26 +89,50 @@ early_param("add_efi_memmap", setup_add_efi_memmap);
79 89
80static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 90static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
81{ 91{
82 return efi_call_virt2(get_time, tm, tc); 92 unsigned long flags;
93 efi_status_t status;
94
95 spin_lock_irqsave(&rtc_lock, flags);
96 status = efi_call_virt2(get_time, tm, tc);
97 spin_unlock_irqrestore(&rtc_lock, flags);
98 return status;
83} 99}
84 100
85static efi_status_t virt_efi_set_time(efi_time_t *tm) 101static efi_status_t virt_efi_set_time(efi_time_t *tm)
86{ 102{
87 return efi_call_virt1(set_time, tm); 103 unsigned long flags;
104 efi_status_t status;
105
106 spin_lock_irqsave(&rtc_lock, flags);
107 status = efi_call_virt1(set_time, tm);
108 spin_unlock_irqrestore(&rtc_lock, flags);
109 return status;
88} 110}
89 111
90static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, 112static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
91 efi_bool_t *pending, 113 efi_bool_t *pending,
92 efi_time_t *tm) 114 efi_time_t *tm)
93{ 115{
94 return efi_call_virt3(get_wakeup_time, 116 unsigned long flags;
95 enabled, pending, tm); 117 efi_status_t status;
118
119 spin_lock_irqsave(&rtc_lock, flags);
120 status = efi_call_virt3(get_wakeup_time,
121 enabled, pending, tm);
122 spin_unlock_irqrestore(&rtc_lock, flags);
123 return status;
96} 124}
97 125
98static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) 126static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
99{ 127{
100 return efi_call_virt2(set_wakeup_time, 128 unsigned long flags;
101 enabled, tm); 129 efi_status_t status;
130
131 spin_lock_irqsave(&rtc_lock, flags);
132 status = efi_call_virt2(set_wakeup_time,
133 enabled, tm);
134 spin_unlock_irqrestore(&rtc_lock, flags);
135 return status;
102} 136}
103 137
104static efi_status_t virt_efi_get_variable(efi_char16_t *name, 138static efi_status_t virt_efi_get_variable(efi_char16_t *name,
@@ -122,7 +156,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
122 156
123static efi_status_t virt_efi_set_variable(efi_char16_t *name, 157static efi_status_t virt_efi_set_variable(efi_char16_t *name,
124 efi_guid_t *vendor, 158 efi_guid_t *vendor,
125 unsigned long attr, 159 u32 attr,
126 unsigned long data_size, 160 unsigned long data_size,
127 void *data) 161 void *data)
128{ 162{
@@ -131,6 +165,18 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
131 data_size, data); 165 data_size, data);
132} 166}
133 167
168static efi_status_t virt_efi_query_variable_info(u32 attr,
169 u64 *storage_space,
170 u64 *remaining_space,
171 u64 *max_variable_size)
172{
173 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
174 return EFI_UNSUPPORTED;
175
176 return efi_call_virt4(query_variable_info, attr, storage_space,
177 remaining_space, max_variable_size);
178}
179
134static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) 180static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
135{ 181{
136 return efi_call_virt1(get_next_high_mono_count, count); 182 return efi_call_virt1(get_next_high_mono_count, count);
@@ -145,6 +191,28 @@ static void virt_efi_reset_system(int reset_type,
145 data_size, data); 191 data_size, data);
146} 192}
147 193
194static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
195 unsigned long count,
196 unsigned long sg_list)
197{
198 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
199 return EFI_UNSUPPORTED;
200
201 return efi_call_virt3(update_capsule, capsules, count, sg_list);
202}
203
204static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
205 unsigned long count,
206 u64 *max_size,
207 int *reset_type)
208{
209 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
210 return EFI_UNSUPPORTED;
211
212 return efi_call_virt4(query_capsule_caps, capsules, count, max_size,
213 reset_type);
214}
215
148static efi_status_t __init phys_efi_set_virtual_address_map( 216static efi_status_t __init phys_efi_set_virtual_address_map(
149 unsigned long memory_map_size, 217 unsigned long memory_map_size,
150 unsigned long descriptor_size, 218 unsigned long descriptor_size,
@@ -164,11 +232,14 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
164static efi_status_t __init phys_efi_get_time(efi_time_t *tm, 232static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
165 efi_time_cap_t *tc) 233 efi_time_cap_t *tc)
166{ 234{
235 unsigned long flags;
167 efi_status_t status; 236 efi_status_t status;
168 237
238 spin_lock_irqsave(&rtc_lock, flags);
169 efi_call_phys_prelog(); 239 efi_call_phys_prelog();
170 status = efi_call_phys2(efi_phys.get_time, tm, tc); 240 status = efi_call_phys2(efi_phys.get_time, tm, tc);
171 efi_call_phys_epilog(); 241 efi_call_phys_epilog();
242 spin_unlock_irqrestore(&rtc_lock, flags);
172 return status; 243 return status;
173} 244}
174 245
@@ -504,9 +575,6 @@ void __init efi_init(void)
504 x86_platform.set_wallclock = efi_set_rtc_mmss; 575 x86_platform.set_wallclock = efi_set_rtc_mmss;
505#endif 576#endif
506 577
507 /* Setup for EFI runtime service */
508 reboot_type = BOOT_EFI;
509
510#if EFI_DEBUG 578#if EFI_DEBUG
511 print_efi_memmap(); 579 print_efi_memmap();
512#endif 580#endif
@@ -672,6 +740,9 @@ void __init efi_enter_virtual_mode(void)
672 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; 740 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
673 efi.reset_system = virt_efi_reset_system; 741 efi.reset_system = virt_efi_reset_system;
674 efi.set_virtual_address_map = NULL; 742 efi.set_virtual_address_map = NULL;
743 efi.query_variable_info = virt_efi_query_variable_info;
744 efi.update_capsule = virt_efi_update_capsule;
745 efi.query_capsule_caps = virt_efi_query_capsule_caps;
675 if (__supported_pte_mask & _PAGE_NX) 746 if (__supported_pte_mask & _PAGE_NX)
676 runtime_code_page_mkexec(); 747 runtime_code_page_mkexec();
677 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 748 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 68e467f69fec..db8b915f54bc 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -296,14 +296,18 @@ static void bau_process_message(struct msg_desc *mdp,
296} 296}
297 297
298/* 298/*
299 * Determine the first cpu on a uvhub. 299 * Determine the first cpu on a pnode.
300 */ 300 */
301static int uvhub_to_first_cpu(int uvhub) 301static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
302{ 302{
303 int cpu; 303 int cpu;
304 for_each_present_cpu(cpu) 304 struct hub_and_pnode *hpp;
305 if (uvhub == uv_cpu_to_blade_id(cpu)) 305
306 for_each_present_cpu(cpu) {
307 hpp = &smaster->thp[cpu];
308 if (pnode == hpp->pnode)
306 return cpu; 309 return cpu;
310 }
307 return -1; 311 return -1;
308} 312}
309 313
@@ -366,28 +370,32 @@ static void do_reset(void *ptr)
366 * Use IPI to get all target uvhubs to release resources held by 370 * Use IPI to get all target uvhubs to release resources held by
367 * a given sending cpu number. 371 * a given sending cpu number.
368 */ 372 */
369static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender) 373static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
370{ 374{
371 int uvhub; 375 int pnode;
376 int apnode;
372 int maskbits; 377 int maskbits;
373 cpumask_t mask; 378 int sender = bcp->cpu;
379 cpumask_t *mask = bcp->uvhub_master->cpumask;
380 struct bau_control *smaster = bcp->socket_master;
374 struct reset_args reset_args; 381 struct reset_args reset_args;
375 382
376 reset_args.sender = sender; 383 reset_args.sender = sender;
377 cpus_clear(mask); 384 cpus_clear(*mask);
378 /* find a single cpu for each uvhub in this distribution mask */ 385 /* find a single cpu for each uvhub in this distribution mask */
379 maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE; 386 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
380 for (uvhub = 0; uvhub < maskbits; uvhub++) { 387 /* each bit is a pnode relative to the partition base pnode */
388 for (pnode = 0; pnode < maskbits; pnode++) {
381 int cpu; 389 int cpu;
382 if (!bau_uvhub_isset(uvhub, distribution)) 390 if (!bau_uvhub_isset(pnode, distribution))
383 continue; 391 continue;
384 /* find a cpu for this uvhub */ 392 apnode = pnode + bcp->partition_base_pnode;
385 cpu = uvhub_to_first_cpu(uvhub); 393 cpu = pnode_to_first_cpu(apnode, smaster);
386 cpu_set(cpu, mask); 394 cpu_set(cpu, *mask);
387 } 395 }
388 396
389 /* IPI all cpus; preemption is already disabled */ 397 /* IPI all cpus; preemption is already disabled */
390 smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1); 398 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
391 return; 399 return;
392} 400}
393 401
@@ -604,7 +612,7 @@ static void destination_plugged(struct bau_desc *bau_desc,
604 quiesce_local_uvhub(hmaster); 612 quiesce_local_uvhub(hmaster);
605 613
606 spin_lock(&hmaster->queue_lock); 614 spin_lock(&hmaster->queue_lock);
607 reset_with_ipi(&bau_desc->distribution, bcp->cpu); 615 reset_with_ipi(&bau_desc->distribution, bcp);
608 spin_unlock(&hmaster->queue_lock); 616 spin_unlock(&hmaster->queue_lock);
609 617
610 end_uvhub_quiesce(hmaster); 618 end_uvhub_quiesce(hmaster);
@@ -626,7 +634,7 @@ static void destination_timeout(struct bau_desc *bau_desc,
626 quiesce_local_uvhub(hmaster); 634 quiesce_local_uvhub(hmaster);
627 635
628 spin_lock(&hmaster->queue_lock); 636 spin_lock(&hmaster->queue_lock);
629 reset_with_ipi(&bau_desc->distribution, bcp->cpu); 637 reset_with_ipi(&bau_desc->distribution, bcp);
630 spin_unlock(&hmaster->queue_lock); 638 spin_unlock(&hmaster->queue_lock);
631 639
632 end_uvhub_quiesce(hmaster); 640 end_uvhub_quiesce(hmaster);
@@ -1334,9 +1342,10 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
1334 1342
1335 instr[count] = '\0'; 1343 instr[count] = '\0';
1336 1344
1337 bcp = &per_cpu(bau_control, smp_processor_id()); 1345 cpu = get_cpu();
1338 1346 bcp = &per_cpu(bau_control, cpu);
1339 ret = parse_tunables_write(bcp, instr, count); 1347 ret = parse_tunables_write(bcp, instr, count);
1348 put_cpu();
1340 if (ret) 1349 if (ret)
1341 return ret; 1350 return ret;
1342 1351
@@ -1687,6 +1696,16 @@ static void make_per_cpu_thp(struct bau_control *smaster)
1687} 1696}
1688 1697
1689/* 1698/*
1699 * Each uvhub is to get a local cpumask.
1700 */
1701static void make_per_hub_cpumask(struct bau_control *hmaster)
1702{
1703 int sz = sizeof(cpumask_t);
1704
1705 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1706}
1707
1708/*
1690 * Initialize all the per_cpu information for the cpu's on a given socket, 1709 * Initialize all the per_cpu information for the cpu's on a given socket,
1691 * given what has been gathered into the socket_desc struct. 1710 * given what has been gathered into the socket_desc struct.
1692 * And reports the chosen hub and socket masters back to the caller. 1711 * And reports the chosen hub and socket masters back to the caller.
@@ -1751,11 +1770,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
1751 sdp = &bdp->socket[socket]; 1770 sdp = &bdp->socket[socket];
1752 if (scan_sock(sdp, bdp, &smaster, &hmaster)) 1771 if (scan_sock(sdp, bdp, &smaster, &hmaster))
1753 return 1; 1772 return 1;
1773 make_per_cpu_thp(smaster);
1754 } 1774 }
1755 socket++; 1775 socket++;
1756 socket_mask = (socket_mask >> 1); 1776 socket_mask = (socket_mask >> 1);
1757 make_per_cpu_thp(smaster);
1758 } 1777 }
1778 make_per_hub_cpumask(hmaster);
1759 } 1779 }
1760 return 0; 1780 return 0;
1761} 1781}
@@ -1777,15 +1797,20 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1777 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); 1797 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1778 1798
1779 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) 1799 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1780 return 1; 1800 goto fail;
1781 1801
1782 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask)) 1802 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
1783 return 1; 1803 goto fail;
1784 1804
1785 kfree(uvhub_descs); 1805 kfree(uvhub_descs);
1786 kfree(uvhub_mask); 1806 kfree(uvhub_mask);
1787 init_per_cpu_tunables(); 1807 init_per_cpu_tunables();
1788 return 0; 1808 return 0;
1809
1810fail:
1811 kfree(uvhub_descs);
1812 kfree(uvhub_mask);
1813 return 1;
1789} 1814}
1790 1815
1791/* 1816/*
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index bef0bc962400..5d179502a52c 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -26,6 +26,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
26export CPPFLAGS_vdso.lds += -P -C 26export CPPFLAGS_vdso.lds += -P -C
27 27
28VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ 28VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
29 -Wl,--no-undefined \
29 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 30 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
30 31
31$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so 32$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index a724905fdae7..6bc0e723b6e8 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -6,7 +6,6 @@
6 * 6 *
7 * The code should have no internal unresolved relocations. 7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing. 8 * Check with readelf after changing.
9 * Also alternative() doesn't work.
10 */ 9 */
11 10
12/* Disable profiling for userspace code: */ 11/* Disable profiling for userspace code: */
@@ -17,6 +16,7 @@
17#include <linux/time.h> 16#include <linux/time.h>
18#include <linux/string.h> 17#include <linux/string.h>
19#include <asm/vsyscall.h> 18#include <asm/vsyscall.h>
19#include <asm/fixmap.h>
20#include <asm/vgtod.h> 20#include <asm/vgtod.h>
21#include <asm/timex.h> 21#include <asm/timex.h>
22#include <asm/hpet.h> 22#include <asm/hpet.h>
@@ -25,6 +25,43 @@
25 25
26#define gtod (&VVAR(vsyscall_gtod_data)) 26#define gtod (&VVAR(vsyscall_gtod_data))
27 27
28notrace static cycle_t vread_tsc(void)
29{
30 cycle_t ret;
31 u64 last;
32
33 /*
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
39 */
40 rdtsc_barrier();
41 ret = (cycle_t)vget_cycles();
42
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
44
45 if (likely(ret >= last))
46 return ret;
47
48 /*
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
55 */
56 asm volatile ("");
57 return last;
58}
59
60static notrace cycle_t vread_hpet(void)
61{
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
63}
64
28notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 65notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
29{ 66{
30 long ret; 67 long ret;
@@ -36,9 +73,12 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
36notrace static inline long vgetns(void) 73notrace static inline long vgetns(void)
37{ 74{
38 long v; 75 long v;
39 cycles_t (*vread)(void); 76 cycles_t cycles;
40 vread = gtod->clock.vread; 77 if (gtod->clock.vclock_mode == VCLOCK_TSC)
41 v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; 78 cycles = vread_tsc();
79 else
80 cycles = vread_hpet();
81 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
42 return (v * gtod->clock.mult) >> gtod->clock.shift; 82 return (v * gtod->clock.mult) >> gtod->clock.shift;
43} 83}
44 84
@@ -116,21 +156,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
116 156
117notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 157notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
118{ 158{
119 if (likely(gtod->sysctl_enabled)) 159 switch (clock) {
120 switch (clock) { 160 case CLOCK_REALTIME:
121 case CLOCK_REALTIME: 161 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
122 if (likely(gtod->clock.vread)) 162 return do_realtime(ts);
123 return do_realtime(ts); 163 break;
124 break; 164 case CLOCK_MONOTONIC:
125 case CLOCK_MONOTONIC: 165 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE))
126 if (likely(gtod->clock.vread)) 166 return do_monotonic(ts);
127 return do_monotonic(ts); 167 break;
128 break; 168 case CLOCK_REALTIME_COARSE:
129 case CLOCK_REALTIME_COARSE: 169 return do_realtime_coarse(ts);
130 return do_realtime_coarse(ts); 170 case CLOCK_MONOTONIC_COARSE:
131 case CLOCK_MONOTONIC_COARSE: 171 return do_monotonic_coarse(ts);
132 return do_monotonic_coarse(ts); 172 }
133 } 173
134 return vdso_fallback_gettime(clock, ts); 174 return vdso_fallback_gettime(clock, ts);
135} 175}
136int clock_gettime(clockid_t, struct timespec *) 176int clock_gettime(clockid_t, struct timespec *)
@@ -139,7 +179,7 @@ int clock_gettime(clockid_t, struct timespec *)
139notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
140{ 180{
141 long ret; 181 long ret;
142 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { 182 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
143 if (likely(tv != NULL)) { 183 if (likely(tv != NULL)) {
144 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
145 offsetof(struct timespec, tv_nsec) || 185 offsetof(struct timespec, tv_nsec) ||
@@ -161,27 +201,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
161int gettimeofday(struct timeval *, struct timezone *) 201int gettimeofday(struct timeval *, struct timezone *)
162 __attribute__((weak, alias("__vdso_gettimeofday"))); 202 __attribute__((weak, alias("__vdso_gettimeofday")));
163 203
164/* This will break when the xtime seconds get inaccurate, but that is 204/*
165 * unlikely */ 205 * This will break when the xtime seconds get inaccurate, but that is
166 206 * unlikely
167static __always_inline long time_syscall(long *t) 207 */
168{
169 long secs;
170 asm volatile("syscall"
171 : "=a" (secs)
172 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
173 return secs;
174}
175
176notrace time_t __vdso_time(time_t *t) 208notrace time_t __vdso_time(time_t *t)
177{ 209{
178 time_t result;
179
180 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181 return time_syscall(t);
182
183 /* This is atomic on x86_64 so we don't need any locks. */ 210 /* This is atomic on x86_64 so we don't need any locks. */
184 result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); 211 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
185 212
186 if (t) 213 if (t)
187 *t = result; 214 *t = result;
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 1d3aa6b87181..1b979c12ba85 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,10 +1,21 @@
1#include <asm/page_types.h>
2#include <linux/linkage.h>
1#include <linux/init.h> 3#include <linux/init.h>
2 4
3__INITDATA 5__PAGE_ALIGNED_DATA
4 6
5 .globl vdso_start, vdso_end 7 .globl vdso_start, vdso_end
8 .align PAGE_SIZE
6vdso_start: 9vdso_start:
7 .incbin "arch/x86/vdso/vdso.so" 10 .incbin "arch/x86/vdso/vdso.so"
8vdso_end: 11vdso_end:
9 12
10__FINIT 13.previous
14
15 .globl vdso_pages
16 .bss
17 .align 8
18 .type vdso_pages, @object
19vdso_pages:
20 .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
21 .size vdso_pages, .-vdso_pages
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 7abd2be0f9b9..316fbca3490e 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -14,41 +14,61 @@
14#include <asm/vgtod.h> 14#include <asm/vgtod.h>
15#include <asm/proto.h> 15#include <asm/proto.h>
16#include <asm/vdso.h> 16#include <asm/vdso.h>
17#include <asm/page.h>
17 18
18unsigned int __read_mostly vdso_enabled = 1; 19unsigned int __read_mostly vdso_enabled = 1;
19 20
20extern char vdso_start[], vdso_end[]; 21extern char vdso_start[], vdso_end[];
21extern unsigned short vdso_sync_cpuid; 22extern unsigned short vdso_sync_cpuid;
22 23
23static struct page **vdso_pages; 24extern struct page *vdso_pages[];
24static unsigned vdso_size; 25static unsigned vdso_size;
25 26
26static int __init init_vdso_vars(void) 27static void __init patch_vdso(void *vdso, size_t len)
28{
29 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0;
31 char *secstrings;
32 void *alt_data;
33 int i;
34
35 BUG_ON(len < sizeof(Elf64_Ehdr));
36 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
37
38 sechdrs = (void *)hdr + hdr->e_shoff;
39 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
40
41 for (i = 1; i < hdr->e_shnum; i++) {
42 Elf64_Shdr *shdr = &sechdrs[i];
43 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
44 alt_sec = shdr;
45 goto found;
46 }
47 }
48
49 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n");
51 return; /* nothing to patch */
52
53found:
54 alt_data = (void *)hdr + alt_sec->sh_offset;
55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
56}
57
58static int __init init_vdso(void)
27{ 59{
28 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; 60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
29 int i; 61 int i;
30 62
63 patch_vdso(vdso_start, vdso_end - vdso_start);
64
31 vdso_size = npages << PAGE_SHIFT; 65 vdso_size = npages << PAGE_SHIFT;
32 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); 66 for (i = 0; i < npages; i++)
33 if (!vdso_pages) 67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
34 goto oom;
35 for (i = 0; i < npages; i++) {
36 struct page *p;
37 p = alloc_page(GFP_KERNEL);
38 if (!p)
39 goto oom;
40 vdso_pages[i] = p;
41 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
42 }
43 68
44 return 0; 69 return 0;
45
46 oom:
47 printk("Cannot allocate vdso\n");
48 vdso_enabled = 0;
49 return -ENOMEM;
50} 70}
51subsys_initcall(init_vdso_vars); 71subsys_initcall(init_vdso);
52 72
53struct linux_binprm; 73struct linux_binprm;
54 74
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 17c565de3d64..ccf73b2f3e69 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,10 +13,10 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o trace.o
17 17
18obj-$(CONFIG_SMP) += smp.o 18obj-$(CONFIG_SMP) += smp.o
19obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 19obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
20obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 20obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
21 21obj-$(CONFIG_XEN_DOM0) += vga.o
22obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o 22obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 5525163a0398..974a528458a0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -341,6 +341,8 @@ static void xen_set_ldt(const void *addr, unsigned entries)
341 struct mmuext_op *op; 341 struct mmuext_op *op;
342 struct multicall_space mcs = xen_mc_entry(sizeof(*op)); 342 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
343 343
344 trace_xen_cpu_set_ldt(addr, entries);
345
344 op = mcs.args; 346 op = mcs.args;
345 op->cmd = MMUEXT_SET_LDT; 347 op->cmd = MMUEXT_SET_LDT;
346 op->arg1.linear_addr = (unsigned long)addr; 348 op->arg1.linear_addr = (unsigned long)addr;
@@ -496,6 +498,8 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
496 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); 498 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
497 u64 entry = *(u64 *)ptr; 499 u64 entry = *(u64 *)ptr;
498 500
501 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
502
499 preempt_disable(); 503 preempt_disable();
500 504
501 xen_mc_flush(); 505 xen_mc_flush();
@@ -565,6 +569,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
565 unsigned long p = (unsigned long)&dt[entrynum]; 569 unsigned long p = (unsigned long)&dt[entrynum];
566 unsigned long start, end; 570 unsigned long start, end;
567 571
572 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
573
568 preempt_disable(); 574 preempt_disable();
569 575
570 start = __this_cpu_read(idt_desc.address); 576 start = __this_cpu_read(idt_desc.address);
@@ -619,6 +625,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
619 static DEFINE_SPINLOCK(lock); 625 static DEFINE_SPINLOCK(lock);
620 static struct trap_info traps[257]; 626 static struct trap_info traps[257];
621 627
628 trace_xen_cpu_load_idt(desc);
629
622 spin_lock(&lock); 630 spin_lock(&lock);
623 631
624 __get_cpu_var(idt_desc) = *desc; 632 __get_cpu_var(idt_desc) = *desc;
@@ -637,6 +645,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
637static void xen_write_gdt_entry(struct desc_struct *dt, int entry, 645static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
638 const void *desc, int type) 646 const void *desc, int type)
639{ 647{
648 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
649
640 preempt_disable(); 650 preempt_disable();
641 651
642 switch (type) { 652 switch (type) {
@@ -665,6 +675,8 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
665static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, 675static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
666 const void *desc, int type) 676 const void *desc, int type)
667{ 677{
678 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
679
668 switch (type) { 680 switch (type) {
669 case DESC_LDT: 681 case DESC_LDT:
670 case DESC_TSS: 682 case DESC_TSS:
@@ -684,7 +696,9 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
684static void xen_load_sp0(struct tss_struct *tss, 696static void xen_load_sp0(struct tss_struct *tss,
685 struct thread_struct *thread) 697 struct thread_struct *thread)
686{ 698{
687 struct multicall_space mcs = xen_mc_entry(0); 699 struct multicall_space mcs;
700
701 mcs = xen_mc_entry(0);
688 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 702 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
689 xen_mc_issue(PARAVIRT_LAZY_CPU); 703 xen_mc_issue(PARAVIRT_LAZY_CPU);
690} 704}
@@ -1248,6 +1262,14 @@ asmlinkage void __init xen_start_kernel(void)
1248 if (pci_xen) 1262 if (pci_xen)
1249 x86_init.pci.arch_init = pci_xen_init; 1263 x86_init.pci.arch_init = pci_xen_init;
1250 } else { 1264 } else {
1265 const struct dom0_vga_console_info *info =
1266 (void *)((char *)xen_start_info +
1267 xen_start_info->console.dom0.info_off);
1268
1269 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1270 xen_start_info->console.domU.mfn = 0;
1271 xen_start_info->console.domU.evtchn = 0;
1272
1251 /* Make sure ACS will be enabled */ 1273 /* Make sure ACS will be enabled */
1252 pci_request_acs(); 1274 pci_request_acs();
1253 } 1275 }
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0ccccb67a993..f987bde77c49 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -48,6 +48,8 @@
48#include <linux/memblock.h> 48#include <linux/memblock.h>
49#include <linux/seq_file.h> 49#include <linux/seq_file.h>
50 50
51#include <trace/events/xen.h>
52
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
52#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
53#include <asm/fixmap.h> 55#include <asm/fixmap.h>
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
194 struct multicall_space mcs; 196 struct multicall_space mcs;
195 struct mmu_update *u; 197 struct mmu_update *u;
196 198
199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
197 mcs = xen_mc_entry(sizeof(*u)); 201 mcs = xen_mc_entry(sizeof(*u));
198 u = mcs.args; 202 u = mcs.args;
199 203
@@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
225 *u = *update; 229 *u = *update;
226} 230}
227 231
232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
228static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
229{ 251{
230 struct mmu_update u; 252 struct mmu_update u;
@@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
245 267
246static void xen_set_pmd(pmd_t *ptr, pmd_t val) 268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
247{ 269{
270 trace_xen_mmu_set_pmd(ptr, val);
271
248 /* If page is not pinned, we can just update the entry 272 /* If page is not pinned, we can just update the entry
249 directly */ 273 directly */
250 if (!xen_page_pinned(ptr)) { 274 if (!xen_page_pinned(ptr)) {
@@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
282 return true; 306 return true;
283} 307}
284 308
285static void xen_set_pte(pte_t *ptep, pte_t pteval) 309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
286{ 310{
287 if (!xen_batched_set_pte(ptep, pteval)) 311 if (!xen_batched_set_pte(ptep, pteval))
288 native_set_pte(ptep, pteval); 312 native_set_pte(ptep, pteval);
289} 313}
290 314
315static void xen_set_pte(pte_t *ptep, pte_t pteval)
316{
317 trace_xen_mmu_set_pte(ptep, pteval);
318 __xen_set_pte(ptep, pteval);
319}
320
291static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 321static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pteval) 322 pte_t *ptep, pte_t pteval)
293{ 323{
294 xen_set_pte(ptep, pteval); 324 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
325 __xen_set_pte(ptep, pteval);
295} 326}
296 327
297pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 328pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
298 unsigned long addr, pte_t *ptep) 329 unsigned long addr, pte_t *ptep)
299{ 330{
300 /* Just return the pte as-is. We preserve the bits on commit */ 331 /* Just return the pte as-is. We preserve the bits on commit */
332 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
301 return *ptep; 333 return *ptep;
302} 334}
303 335
@@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
306{ 338{
307 struct mmu_update u; 339 struct mmu_update u;
308 340
341 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
309 xen_mc_batch(); 342 xen_mc_batch();
310 343
311 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 344 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
@@ -530,6 +563,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
530 563
531static void xen_set_pud(pud_t *ptr, pud_t val) 564static void xen_set_pud(pud_t *ptr, pud_t val)
532{ 565{
566 trace_xen_mmu_set_pud(ptr, val);
567
533 /* If page is not pinned, we can just update the entry 568 /* If page is not pinned, we can just update the entry
534 directly */ 569 directly */
535 if (!xen_page_pinned(ptr)) { 570 if (!xen_page_pinned(ptr)) {
@@ -543,17 +578,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
543#ifdef CONFIG_X86_PAE 578#ifdef CONFIG_X86_PAE
544static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 579static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
545{ 580{
581 trace_xen_mmu_set_pte_atomic(ptep, pte);
546 set_64bit((u64 *)ptep, native_pte_val(pte)); 582 set_64bit((u64 *)ptep, native_pte_val(pte));
547} 583}
548 584
549static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 585static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
550{ 586{
587 trace_xen_mmu_pte_clear(mm, addr, ptep);
551 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 588 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
552 native_pte_clear(mm, addr, ptep); 589 native_pte_clear(mm, addr, ptep);
553} 590}
554 591
555static void xen_pmd_clear(pmd_t *pmdp) 592static void xen_pmd_clear(pmd_t *pmdp)
556{ 593{
594 trace_xen_mmu_pmd_clear(pmdp);
557 set_pmd(pmdp, __pmd(0)); 595 set_pmd(pmdp, __pmd(0));
558} 596}
559#endif /* CONFIG_X86_PAE */ 597#endif /* CONFIG_X86_PAE */
@@ -629,6 +667,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
629{ 667{
630 pgd_t *user_ptr = xen_get_user_pgd(ptr); 668 pgd_t *user_ptr = xen_get_user_pgd(ptr);
631 669
670 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
671
632 /* If page is not pinned, we can just update the entry 672 /* If page is not pinned, we can just update the entry
633 directly */ 673 directly */
634 if (!xen_page_pinned(ptr)) { 674 if (!xen_page_pinned(ptr)) {
@@ -788,14 +828,12 @@ static void xen_pte_unlock(void *v)
788 828
789static void xen_do_pin(unsigned level, unsigned long pfn) 829static void xen_do_pin(unsigned level, unsigned long pfn)
790{ 830{
791 struct mmuext_op *op; 831 struct mmuext_op op;
792 struct multicall_space mcs;
793 832
794 mcs = __xen_mc_entry(sizeof(*op)); 833 op.cmd = level;
795 op = mcs.args; 834 op.arg1.mfn = pfn_to_mfn(pfn);
796 op->cmd = level; 835
797 op->arg1.mfn = pfn_to_mfn(pfn); 836 xen_extend_mmuext_op(&op);
798 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
799} 837}
800 838
801static int xen_pin_page(struct mm_struct *mm, struct page *page, 839static int xen_pin_page(struct mm_struct *mm, struct page *page,
@@ -863,6 +901,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
863 read-only, and can be pinned. */ 901 read-only, and can be pinned. */
864static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 902static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
865{ 903{
904 trace_xen_mmu_pgd_pin(mm, pgd);
905
866 xen_mc_batch(); 906 xen_mc_batch();
867 907
868 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 908 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -988,6 +1028,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
988/* Release a pagetables pages back as normal RW */ 1028/* Release a pagetables pages back as normal RW */
989static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) 1029static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
990{ 1030{
1031 trace_xen_mmu_pgd_unpin(mm, pgd);
1032
991 xen_mc_batch(); 1033 xen_mc_batch();
992 1034
993 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1035 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1196,6 +1238,8 @@ static void xen_flush_tlb(void)
1196 struct mmuext_op *op; 1238 struct mmuext_op *op;
1197 struct multicall_space mcs; 1239 struct multicall_space mcs;
1198 1240
1241 trace_xen_mmu_flush_tlb(0);
1242
1199 preempt_disable(); 1243 preempt_disable();
1200 1244
1201 mcs = xen_mc_entry(sizeof(*op)); 1245 mcs = xen_mc_entry(sizeof(*op));
@@ -1214,6 +1258,8 @@ static void xen_flush_tlb_single(unsigned long addr)
1214 struct mmuext_op *op; 1258 struct mmuext_op *op;
1215 struct multicall_space mcs; 1259 struct multicall_space mcs;
1216 1260
1261 trace_xen_mmu_flush_tlb_single(addr);
1262
1217 preempt_disable(); 1263 preempt_disable();
1218 1264
1219 mcs = xen_mc_entry(sizeof(*op)); 1265 mcs = xen_mc_entry(sizeof(*op));
@@ -1240,6 +1286,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1240 } *args; 1286 } *args;
1241 struct multicall_space mcs; 1287 struct multicall_space mcs;
1242 1288
1289 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1290
1243 if (cpumask_empty(cpus)) 1291 if (cpumask_empty(cpus))
1244 return; /* nothing to do */ 1292 return; /* nothing to do */
1245 1293
@@ -1275,10 +1323,11 @@ static void set_current_cr3(void *v)
1275 1323
1276static void __xen_write_cr3(bool kernel, unsigned long cr3) 1324static void __xen_write_cr3(bool kernel, unsigned long cr3)
1277{ 1325{
1278 struct mmuext_op *op; 1326 struct mmuext_op op;
1279 struct multicall_space mcs;
1280 unsigned long mfn; 1327 unsigned long mfn;
1281 1328
1329 trace_xen_mmu_write_cr3(kernel, cr3);
1330
1282 if (cr3) 1331 if (cr3)
1283 mfn = pfn_to_mfn(PFN_DOWN(cr3)); 1332 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1284 else 1333 else
@@ -1286,13 +1335,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
1286 1335
1287 WARN_ON(mfn == 0 && kernel); 1336 WARN_ON(mfn == 0 && kernel);
1288 1337
1289 mcs = __xen_mc_entry(sizeof(*op)); 1338 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1290 1339 op.arg1.mfn = mfn;
1291 op = mcs.args;
1292 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1293 op->arg1.mfn = mfn;
1294 1340
1295 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1341 xen_extend_mmuext_op(&op);
1296 1342
1297 if (kernel) { 1343 if (kernel) {
1298 percpu_write(xen_cr3, cr3); 1344 percpu_write(xen_cr3, cr3);
@@ -1451,19 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn)
1451 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1497 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1452} 1498}
1453 1499
1500static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1501{
1502 struct multicall_space mcs;
1503 struct mmuext_op *op;
1504
1505 mcs = __xen_mc_entry(sizeof(*op));
1506 op = mcs.args;
1507 op->cmd = cmd;
1508 op->arg1.mfn = pfn_to_mfn(pfn);
1509
1510 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1511}
1512
1513static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1514{
1515 struct multicall_space mcs;
1516 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1517
1518 mcs = __xen_mc_entry(0);
1519 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1520 pfn_pte(pfn, prot), 0);
1521}
1522
1454/* This needs to make sure the new pte page is pinned iff its being 1523/* This needs to make sure the new pte page is pinned iff its being
1455 attached to a pinned pagetable. */ 1524 attached to a pinned pagetable. */
1456static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) 1525static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1526 unsigned level)
1457{ 1527{
1458 struct page *page = pfn_to_page(pfn); 1528 bool pinned = PagePinned(virt_to_page(mm->pgd));
1529
1530 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1531
1532 if (pinned) {
1533 struct page *page = pfn_to_page(pfn);
1459 1534
1460 if (PagePinned(virt_to_page(mm->pgd))) {
1461 SetPagePinned(page); 1535 SetPagePinned(page);
1462 1536
1463 if (!PageHighMem(page)) { 1537 if (!PageHighMem(page)) {
1464 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 1538 xen_mc_batch();
1539
1540 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1541
1465 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1542 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1466 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1543 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1544
1545 xen_mc_issue(PARAVIRT_LAZY_MMU);
1467 } else { 1546 } else {
1468 /* make sure there are no stray mappings of 1547 /* make sure there are no stray mappings of
1469 this page */ 1548 this page */
@@ -1483,15 +1562,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1483} 1562}
1484 1563
1485/* This should never happen until we're OK to use struct page */ 1564/* This should never happen until we're OK to use struct page */
1486static void xen_release_ptpage(unsigned long pfn, unsigned level) 1565static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1487{ 1566{
1488 struct page *page = pfn_to_page(pfn); 1567 struct page *page = pfn_to_page(pfn);
1568 bool pinned = PagePinned(page);
1489 1569
1490 if (PagePinned(page)) { 1570 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1571
1572 if (pinned) {
1491 if (!PageHighMem(page)) { 1573 if (!PageHighMem(page)) {
1574 xen_mc_batch();
1575
1492 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1576 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1493 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1577 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1494 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1578
1579 __set_pfn_prot(pfn, PAGE_KERNEL);
1580
1581 xen_mc_issue(PARAVIRT_LAZY_MMU);
1495 } 1582 }
1496 ClearPagePinned(page); 1583 ClearPagePinned(page);
1497 } 1584 }
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 1b2b73ff0a6e..0d82003e76ad 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -30,12 +30,13 @@
30 30
31#define MC_BATCH 32 31#define MC_BATCH 32
32 32
33#define MC_DEBUG 1 33#define MC_DEBUG 0
34 34
35#define MC_ARGS (MC_BATCH * 16) 35#define MC_ARGS (MC_BATCH * 16)
36 36
37 37
38struct mc_buffer { 38struct mc_buffer {
39 unsigned mcidx, argidx, cbidx;
39 struct multicall_entry entries[MC_BATCH]; 40 struct multicall_entry entries[MC_BATCH];
40#if MC_DEBUG 41#if MC_DEBUG
41 struct multicall_entry debug[MC_BATCH]; 42 struct multicall_entry debug[MC_BATCH];
@@ -46,85 +47,15 @@ struct mc_buffer {
46 void (*fn)(void *); 47 void (*fn)(void *);
47 void *data; 48 void *data;
48 } callbacks[MC_BATCH]; 49 } callbacks[MC_BATCH];
49 unsigned mcidx, argidx, cbidx;
50}; 50};
51 51
52static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); 52static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
53DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); 53DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
54 54
55/* flush reasons 0- slots, 1- args, 2- callbacks */
56enum flush_reasons
57{
58 FL_SLOTS,
59 FL_ARGS,
60 FL_CALLBACKS,
61
62 FL_N_REASONS
63};
64
65#ifdef CONFIG_XEN_DEBUG_FS
66#define NHYPERCALLS 40 /* not really */
67
68static struct {
69 unsigned histo[MC_BATCH+1];
70
71 unsigned issued;
72 unsigned arg_total;
73 unsigned hypercalls;
74 unsigned histo_hypercalls[NHYPERCALLS];
75
76 unsigned flush[FL_N_REASONS];
77} mc_stats;
78
79static u8 zero_stats;
80
81static inline void check_zero(void)
82{
83 if (unlikely(zero_stats)) {
84 memset(&mc_stats, 0, sizeof(mc_stats));
85 zero_stats = 0;
86 }
87}
88
89static void mc_add_stats(const struct mc_buffer *mc)
90{
91 int i;
92
93 check_zero();
94
95 mc_stats.issued++;
96 mc_stats.hypercalls += mc->mcidx;
97 mc_stats.arg_total += mc->argidx;
98
99 mc_stats.histo[mc->mcidx]++;
100 for(i = 0; i < mc->mcidx; i++) {
101 unsigned op = mc->entries[i].op;
102 if (op < NHYPERCALLS)
103 mc_stats.histo_hypercalls[op]++;
104 }
105}
106
107static void mc_stats_flush(enum flush_reasons idx)
108{
109 check_zero();
110
111 mc_stats.flush[idx]++;
112}
113
114#else /* !CONFIG_XEN_DEBUG_FS */
115
116static inline void mc_add_stats(const struct mc_buffer *mc)
117{
118}
119
120static inline void mc_stats_flush(enum flush_reasons idx)
121{
122}
123#endif /* CONFIG_XEN_DEBUG_FS */
124
125void xen_mc_flush(void) 55void xen_mc_flush(void)
126{ 56{
127 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 57 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
58 struct multicall_entry *mc;
128 int ret = 0; 59 int ret = 0;
129 unsigned long flags; 60 unsigned long flags;
130 int i; 61 int i;
@@ -135,9 +66,26 @@ void xen_mc_flush(void)
135 something in the middle */ 66 something in the middle */
136 local_irq_save(flags); 67 local_irq_save(flags);
137 68
138 mc_add_stats(b); 69 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
70
71 switch (b->mcidx) {
72 case 0:
73 /* no-op */
74 BUG_ON(b->argidx != 0);
75 break;
76
77 case 1:
78 /* Singleton multicall - bypass multicall machinery
79 and just do the call directly. */
80 mc = &b->entries[0];
81
82 mc->result = privcmd_call(mc->op,
83 mc->args[0], mc->args[1], mc->args[2],
84 mc->args[3], mc->args[4]);
85 ret = mc->result < 0;
86 break;
139 87
140 if (b->mcidx) { 88 default:
141#if MC_DEBUG 89#if MC_DEBUG
142 memcpy(b->debug, b->entries, 90 memcpy(b->debug, b->entries,
143 b->mcidx * sizeof(struct multicall_entry)); 91 b->mcidx * sizeof(struct multicall_entry));
@@ -164,11 +112,10 @@ void xen_mc_flush(void)
164 } 112 }
165 } 113 }
166#endif 114#endif
115 }
167 116
168 b->mcidx = 0; 117 b->mcidx = 0;
169 b->argidx = 0; 118 b->argidx = 0;
170 } else
171 BUG_ON(b->argidx != 0);
172 119
173 for (i = 0; i < b->cbidx; i++) { 120 for (i = 0; i < b->cbidx; i++) {
174 struct callback *cb = &b->callbacks[i]; 121 struct callback *cb = &b->callbacks[i];
@@ -188,18 +135,21 @@ struct multicall_space __xen_mc_entry(size_t args)
188 struct multicall_space ret; 135 struct multicall_space ret;
189 unsigned argidx = roundup(b->argidx, sizeof(u64)); 136 unsigned argidx = roundup(b->argidx, sizeof(u64));
190 137
138 trace_xen_mc_entry_alloc(args);
139
191 BUG_ON(preemptible()); 140 BUG_ON(preemptible());
192 BUG_ON(b->argidx >= MC_ARGS); 141 BUG_ON(b->argidx >= MC_ARGS);
193 142
194 if (b->mcidx == MC_BATCH || 143 if (unlikely(b->mcidx == MC_BATCH ||
195 (argidx + args) >= MC_ARGS) { 144 (argidx + args) >= MC_ARGS)) {
196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); 145 trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
146 XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
197 xen_mc_flush(); 147 xen_mc_flush();
198 argidx = roundup(b->argidx, sizeof(u64)); 148 argidx = roundup(b->argidx, sizeof(u64));
199 } 149 }
200 150
201 ret.mc = &b->entries[b->mcidx]; 151 ret.mc = &b->entries[b->mcidx];
202#ifdef MC_DEBUG 152#if MC_DEBUG
203 b->caller[b->mcidx] = __builtin_return_address(0); 153 b->caller[b->mcidx] = __builtin_return_address(0);
204#endif 154#endif
205 b->mcidx++; 155 b->mcidx++;
@@ -218,20 +168,25 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
218 BUG_ON(preemptible()); 168 BUG_ON(preemptible());
219 BUG_ON(b->argidx >= MC_ARGS); 169 BUG_ON(b->argidx >= MC_ARGS);
220 170
221 if (b->mcidx == 0) 171 if (unlikely(b->mcidx == 0 ||
222 return ret; 172 b->entries[b->mcidx - 1].op != op)) {
223 173 trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
224 if (b->entries[b->mcidx - 1].op != op) 174 goto out;
225 return ret; 175 }
226 176
227 if ((b->argidx + size) >= MC_ARGS) 177 if (unlikely((b->argidx + size) >= MC_ARGS)) {
228 return ret; 178 trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
179 goto out;
180 }
229 181
230 ret.mc = &b->entries[b->mcidx - 1]; 182 ret.mc = &b->entries[b->mcidx - 1];
231 ret.args = &b->args[b->argidx]; 183 ret.args = &b->args[b->argidx];
232 b->argidx += size; 184 b->argidx += size;
233 185
234 BUG_ON(b->argidx >= MC_ARGS); 186 BUG_ON(b->argidx >= MC_ARGS);
187
188 trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
189out:
235 return ret; 190 return ret;
236} 191}
237 192
@@ -241,43 +196,13 @@ void xen_mc_callback(void (*fn)(void *), void *data)
241 struct callback *cb; 196 struct callback *cb;
242 197
243 if (b->cbidx == MC_BATCH) { 198 if (b->cbidx == MC_BATCH) {
244 mc_stats_flush(FL_CALLBACKS); 199 trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
245 xen_mc_flush(); 200 xen_mc_flush();
246 } 201 }
247 202
203 trace_xen_mc_callback(fn, data);
204
248 cb = &b->callbacks[b->cbidx++]; 205 cb = &b->callbacks[b->cbidx++];
249 cb->fn = fn; 206 cb->fn = fn;
250 cb->data = data; 207 cb->data = data;
251} 208}
252
253#ifdef CONFIG_XEN_DEBUG_FS
254
255static struct dentry *d_mc_debug;
256
257static int __init xen_mc_debugfs(void)
258{
259 struct dentry *d_xen = xen_init_debugfs();
260
261 if (d_xen == NULL)
262 return -ENOMEM;
263
264 d_mc_debug = debugfs_create_dir("multicalls", d_xen);
265
266 debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
267
268 debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
269 debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
270 debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
271
272 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
273 mc_stats.histo, MC_BATCH);
274 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
275 mc_stats.histo_hypercalls, NHYPERCALLS);
276 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
277 mc_stats.flush, FL_N_REASONS);
278
279 return 0;
280}
281fs_initcall(xen_mc_debugfs);
282
283#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 4ec8035e3216..dee79b78a90f 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -1,6 +1,8 @@
1#ifndef _XEN_MULTICALLS_H 1#ifndef _XEN_MULTICALLS_H
2#define _XEN_MULTICALLS_H 2#define _XEN_MULTICALLS_H
3 3
4#include <trace/events/xen.h>
5
4#include "xen-ops.h" 6#include "xen-ops.h"
5 7
6/* Multicalls */ 8/* Multicalls */
@@ -20,8 +22,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
20static inline void xen_mc_batch(void) 22static inline void xen_mc_batch(void)
21{ 23{
22 unsigned long flags; 24 unsigned long flags;
25
23 /* need to disable interrupts until this entry is complete */ 26 /* need to disable interrupts until this entry is complete */
24 local_irq_save(flags); 27 local_irq_save(flags);
28 trace_xen_mc_batch(paravirt_get_lazy_mode());
25 __this_cpu_write(xen_mc_irq_flags, flags); 29 __this_cpu_write(xen_mc_irq_flags, flags);
26} 30}
27 31
@@ -37,6 +41,8 @@ void xen_mc_flush(void);
37/* Issue a multicall if we're not in a lazy mode */ 41/* Issue a multicall if we're not in a lazy mode */
38static inline void xen_mc_issue(unsigned mode) 42static inline void xen_mc_issue(unsigned mode)
39{ 43{
44 trace_xen_mc_issue(mode);
45
40 if ((paravirt_get_lazy_mode() & mode) == 0) 46 if ((paravirt_get_lazy_mode() & mode) == 0)
41 xen_mc_flush(); 47 xen_mc_flush();
42 48
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 25c52f94a27c..ffcf2615640b 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
35#ifdef CONFIG_XEN_PVHVM 35#ifdef CONFIG_XEN_PVHVM
36static int xen_emul_unplug; 36static int xen_emul_unplug;
37 37
38static int __init check_platform_magic(void) 38static int check_platform_magic(void)
39{ 39{
40 short magic; 40 short magic;
41 char protocol; 41 char protocol;
diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c
new file mode 100644
index 000000000000..734beba2a08c
--- /dev/null
+++ b/arch/x86/xen/trace.c
@@ -0,0 +1,61 @@
1#include <linux/ftrace.h>
2
3#define N(x) [__HYPERVISOR_##x] = "("#x")"
4static const char *xen_hypercall_names[] = {
5 N(set_trap_table),
6 N(mmu_update),
7 N(set_gdt),
8 N(stack_switch),
9 N(set_callbacks),
10 N(fpu_taskswitch),
11 N(sched_op_compat),
12 N(dom0_op),
13 N(set_debugreg),
14 N(get_debugreg),
15 N(update_descriptor),
16 N(memory_op),
17 N(multicall),
18 N(update_va_mapping),
19 N(set_timer_op),
20 N(event_channel_op_compat),
21 N(xen_version),
22 N(console_io),
23 N(physdev_op_compat),
24 N(grant_table_op),
25 N(vm_assist),
26 N(update_va_mapping_otherdomain),
27 N(iret),
28 N(vcpu_op),
29 N(set_segment_base),
30 N(mmuext_op),
31 N(acm_op),
32 N(nmi_op),
33 N(sched_op),
34 N(callback_op),
35 N(xenoprof_op),
36 N(event_channel_op),
37 N(physdev_op),
38 N(hvm_op),
39
40/* Architecture-specific hypercall definitions. */
41 N(arch_0),
42 N(arch_1),
43 N(arch_2),
44 N(arch_3),
45 N(arch_4),
46 N(arch_5),
47 N(arch_6),
48 N(arch_7),
49};
50#undef N
51
52static const char *xen_hypercall_name(unsigned op)
53{
54 if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL)
55 return xen_hypercall_names[op];
56
57 return "";
58}
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/xen.h>
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
new file mode 100644
index 000000000000..1cd7f4d11e29
--- /dev/null
+++ b/arch/x86/xen/vga.c
@@ -0,0 +1,67 @@
1#include <linux/screen_info.h>
2#include <linux/init.h>
3
4#include <asm/bootparam.h>
5#include <asm/setup.h>
6
7#include <xen/interface/xen.h>
8
9#include "xen-ops.h"
10
11void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
12{
13 struct screen_info *screen_info = &boot_params.screen_info;
14
15 /* This is drawn from a dump from vgacon:startup in
16 * standard Linux. */
17 screen_info->orig_video_mode = 3;
18 screen_info->orig_video_isVGA = 1;
19 screen_info->orig_video_lines = 25;
20 screen_info->orig_video_cols = 80;
21 screen_info->orig_video_ega_bx = 3;
22 screen_info->orig_video_points = 16;
23 screen_info->orig_y = screen_info->orig_video_lines - 1;
24
25 switch (info->video_type) {
26 case XEN_VGATYPE_TEXT_MODE_3:
27 if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
28 + sizeof(info->u.text_mode_3))
29 break;
30 screen_info->orig_video_lines = info->u.text_mode_3.rows;
31 screen_info->orig_video_cols = info->u.text_mode_3.columns;
32 screen_info->orig_x = info->u.text_mode_3.cursor_x;
33 screen_info->orig_y = info->u.text_mode_3.cursor_y;
34 screen_info->orig_video_points =
35 info->u.text_mode_3.font_height;
36 break;
37
38 case XEN_VGATYPE_VESA_LFB:
39 if (size < offsetof(struct dom0_vga_console_info,
40 u.vesa_lfb.gbl_caps))
41 break;
42 screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB;
43 screen_info->lfb_width = info->u.vesa_lfb.width;
44 screen_info->lfb_height = info->u.vesa_lfb.height;
45 screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel;
46 screen_info->lfb_base = info->u.vesa_lfb.lfb_base;
47 screen_info->lfb_size = info->u.vesa_lfb.lfb_size;
48 screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line;
49 screen_info->red_size = info->u.vesa_lfb.red_size;
50 screen_info->red_pos = info->u.vesa_lfb.red_pos;
51 screen_info->green_size = info->u.vesa_lfb.green_size;
52 screen_info->green_pos = info->u.vesa_lfb.green_pos;
53 screen_info->blue_size = info->u.vesa_lfb.blue_size;
54 screen_info->blue_pos = info->u.vesa_lfb.blue_pos;
55 screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
56 screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
57 if (size >= offsetof(struct dom0_vga_console_info,
58 u.vesa_lfb.gbl_caps)
59 + sizeof(info->u.vesa_lfb.gbl_caps))
60 screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
61 if (size >= offsetof(struct dom0_vga_console_info,
62 u.vesa_lfb.mode_attrs)
63 + sizeof(info->u.vesa_lfb.mode_attrs))
64 screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
65 break;
66 }
67}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 97dfdc8757b3..b095739ccd4c 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu)
88} 88}
89#endif 89#endif
90 90
91struct dom0_vga_console_info;
92
93#ifdef CONFIG_XEN_DOM0
94void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
95#else
96static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
97 size_t size)
98{
99}
100#endif
101
91/* Declare an asm function, along with symbols needed to make it 102/* Declare an asm function, along with symbols needed to make it
92 inlineable */ 103 inlineable */
93#define DECL_ASM(ret, name, ...) \ 104#define DECL_ASM(ret, name, ...) \
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 5d43c1f8ada8..c346ccdce0df 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -80,18 +80,7 @@ config XTENSA_UNALIGNED_USER
80 80
81 Say Y here to enable unaligned memory access in user space. 81 Say Y here to enable unaligned memory access in user space.
82 82
83config PREEMPT 83source "kernel/Kconfig.preempt"
84 bool "Preemptible Kernel"
85 help
86 This option reduces the latency of the kernel when reacting to
87 real-time or interactive events by allowing a low priority process to
88 be preempted even if it is in kernel mode executing a system call.
89 Unfortunately the kernel code has some race conditions if both
90 CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is
91 currently disabled if you are building an SMP kernel.
92
93 Say Y here if you are building a kernel for a desktop, embedded
94 or real-time system. Say N if you are unsure.
95 84
96config MATH_EMULATION 85config MATH_EMULATION
97 bool "Math emulation" 86 bool "Math emulation"